diff --git a/.github/CONTRIBUTING.md b/CONTRIBUTING.md similarity index 100% rename from .github/CONTRIBUTING.md rename to CONTRIBUTING.md diff --git a/Vagrantfile b/Vagrantfile index 454d114f1a2..4f8ee7164f6 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -23,15 +23,15 @@ Vagrant.configure(2) do |config| config.vm.define "ubuntu-1204" do |config| - config.vm.box = "ubuntu/precise64" + config.vm.box = "elastic/ubuntu-12.04-x86_64" ubuntu_common config end config.vm.define "ubuntu-1404" do |config| - config.vm.box = "ubuntu/trusty64" + config.vm.box = "elastic/ubuntu-14.04-x86_64" ubuntu_common config end config.vm.define "ubuntu-1504" do |config| - config.vm.box = "ubuntu/vivid64" + config.vm.box = "elastic/ubuntu-15.04-x86_64" ubuntu_common config, extra: <<-SHELL # Install Jayatana so we can work around it being present. [ -f /usr/share/java/jayatanaag.jar ] || install jayatana @@ -41,44 +41,35 @@ Vagrant.configure(2) do |config| # get the sun jdk on there just aren't worth it. We have jessie for testing # debian and it works fine. config.vm.define "debian-8" do |config| - config.vm.box = "debian/jessie64" - deb_common config, - 'echo deb http://http.debian.net/debian jessie-backports main > /etc/apt/sources.list.d/backports.list', 'backports' + config.vm.box = "elastic/debian-8-x86_64" + deb_common config, 'echo deb http://http.debian.net/debian jessie-backports main > /etc/apt/sources.list.d/backports.list', 'backports' end config.vm.define "centos-6" do |config| - config.vm.box = "boxcutter/centos67" + config.vm.box = "elastic/centos-6-x86_64" rpm_common config end config.vm.define "centos-7" do |config| - # There is a centos/7 box but it doesn't have rsync or virtualbox guest - # stuff on there so its slow to use. So chef it is.... - config.vm.box = "boxcutter/centos71" + config.vm.box = "elastic/centos-7-x86_64" + rpm_common config + end + config.vm.define "oel-6" do |config| + config.vm.box = "elastic/oraclelinux-6-x86_64" rpm_common config end - # This box hangs _forever_ on ```yum check-update```. I have no idea why. - # config.vm.define "oel-6", autostart: false do |config| - # config.vm.box = "boxcutter/oel66" - # rpm_common(config) - # end config.vm.define "oel-7" do |config| - config.vm.box = "boxcutter/oel70" + config.vm.box = "elastic/oraclelinux-7-x86_64" rpm_common config end config.vm.define "fedora-22" do |config| - # Fedora hosts their own 'cloud' images that aren't in Vagrant's Atlas but - # and are missing required stuff like rsync. It'd be nice if we could use - # them but they much slower to get up and running then the boxcutter image. - config.vm.box = "boxcutter/fedora22" + config.vm.box = "elastic/fedora-22-x86_64" dnf_common config end config.vm.define "opensuse-13" do |config| - config.vm.box = "chef/opensuse-13" - config.vm.box_url = "http://opscode-vm-bento.s3.amazonaws.com/vagrant/virtualbox/opscode_opensuse-13.2-x86_64_chef-provisionerless.box" + config.vm.box = "elastic/opensuse-13-x86_64" opensuse_common config end - # The SLES boxes are not considered to be highest quality, but seem to be sufficient for a test run config.vm.define "sles-12" do |config| - config.vm.box = "idar/sles12" + config.vm.box = "elastic/sles-12-x86_64" sles_common config end # Switch the default share for the project root from /vagrant to diff --git a/build.gradle b/build.gradle index b419bf01e15..6ab00d73881 100644 --- a/build.gradle +++ b/build.gradle @@ -116,6 +116,7 @@ subprojects { "org.elasticsearch.distribution.tar:elasticsearch:${version}": ':distribution:tar', "org.elasticsearch.distribution.rpm:elasticsearch:${version}": ':distribution:rpm', "org.elasticsearch.distribution.deb:elasticsearch:${version}": ':distribution:deb', + "org.elasticsearch.test:logger-usage:${version}": ':test:logger-usage', ] configurations.all { resolutionStrategy.dependencySubstitution { DependencySubstitutions subs -> diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index ca78157bcf2..598be546f26 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -307,6 +307,12 @@ class BuildPlugin implements Plugin { /** Adds repositores used by ES dependencies */ static void configureRepositories(Project project) { RepositoryHandler repos = project.repositories + if (System.getProperty("repos.mavenlocal") != null) { + // with -Drepos.mavenlocal=true we can force checking the local .m2 repo which is + // useful for development ie. bwc tests where we install stuff in the local repository + // such that we don't have to pass hardcoded files to gradle + repos.mavenLocal() + } repos.mavenCentral() repos.maven { name 'sonatype-snapshots' @@ -407,6 +413,7 @@ class BuildPlugin implements Plugin { systemProperty 'jna.nosys', 'true' // default test sysprop values systemProperty 'tests.ifNoTests', 'fail' + // TODO: remove setting logging level via system property systemProperty 'es.logger.level', 'WARN' for (Map.Entry property : System.properties.entrySet()) { if (property.getKey().startsWith('tests.') || diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index bdb563e001b..b04f959e068 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -68,7 +68,7 @@ public class PluginBuildPlugin extends BuildPlugin { testCompile "org.elasticsearch.test:framework:${project.versions.elasticsearch}" // we "upgrade" these optional deps to provided for plugins, since they will run // with a full elasticsearch server that includes optional deps - provided "com.spatial4j:spatial4j:${project.versions.spatial4j}" + provided "org.locationtech.spatial4j:spatial4j:${project.versions.spatial4j}" provided "com.vividsolutions:jts:${project.versions.jts}" provided "log4j:log4j:${project.versions.log4j}" provided "log4j:apache-log4j-extras:${project.versions.log4j}" diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy index 7b525d39f53..b5128817fb0 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy @@ -68,11 +68,17 @@ class PluginPropertiesTask extends Copy { } Map generateSubstitutions() { + def stringSnap = { version -> + if (version.endsWith("-SNAPSHOT")) { + return version.substring(0, version.length() - 9) + } + return version + } return [ 'name': extension.name, 'description': extension.description, - 'version': extension.version, - 'elasticsearchVersion': VersionProperties.elasticsearch, + 'version': stringSnap(extension.version), + 'elasticsearchVersion': stringSnap(VersionProperties.elasticsearch), 'javaVersion': project.targetCompatibility as String, 'isolated': extension.isolated as String, 'classname': extension.classname diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LoggerUsageTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LoggerUsageTask.groovy new file mode 100644 index 00000000000..b280a74db58 --- /dev/null +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LoggerUsageTask.groovy @@ -0,0 +1,98 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle.precommit + +import org.elasticsearch.gradle.LoggedExec +import org.gradle.api.file.FileCollection +import org.gradle.api.tasks.InputFiles +import org.gradle.api.tasks.OutputFile + +/** + * Runs LoggerUsageCheck on a set of directories. + */ +public class LoggerUsageTask extends LoggedExec { + + /** + * We use a simple "marker" file that we touch when the task succeeds + * as the task output. This is compared against the modified time of the + * inputs (ie the jars/class files). + */ + private File successMarker = new File(project.buildDir, 'markers/loggerUsage') + + private FileCollection classpath; + + private List classDirectories; + + public LoggerUsageTask() { + project.afterEvaluate { + dependsOn(classpath) + description = "Runs LoggerUsageCheck on ${classDirectories}" + executable = new File(project.javaHome, 'bin/java') + if (classDirectories == null) { + classDirectories = [] + if (project.sourceSets.findByName("main") && project.sourceSets.main.output.classesDir.exists()) { + classDirectories += [project.sourceSets.main.output.classesDir] + dependsOn project.tasks.classes + } + if (project.sourceSets.findByName("test") && project.sourceSets.test.output.classesDir.exists()) { + classDirectories += [project.sourceSets.test.output.classesDir] + dependsOn project.tasks.testClasses + } + } + doFirst({ + args('-cp', getClasspath().asPath, 'org.elasticsearch.test.loggerusage.ESLoggerUsageChecker') + getClassDirectories().each { + args it.getAbsolutePath() + } + }) + doLast({ + successMarker.parentFile.mkdirs() + successMarker.setText("", 'UTF-8') + }) + } + } + + @InputFiles + FileCollection getClasspath() { + return classpath + } + + void setClasspath(FileCollection classpath) { + this.classpath = classpath + } + + @InputFiles + List getClassDirectories() { + return classDirectories + } + + void setClassDirectories(List classDirectories) { + this.classDirectories = classDirectories + } + + @OutputFile + File getSuccessMarker() { + return successMarker + } + + void setSuccessMarker(File successMarker) { + this.successMarker = successMarker + } +} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index ab524351274..0d4a51f050a 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -34,6 +34,7 @@ class PrecommitTasks { configureForbiddenApis(project), configureCheckstyle(project), configureNamingConventions(project), + configureLoggerUsage(project), project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class), project.tasks.create('licenseHeaders', LicenseHeadersTask.class), project.tasks.create('jarHell', JarHellTask.class), @@ -63,21 +64,21 @@ class PrecommitTasks { project.forbiddenApis { internalRuntimeForbidden = true failOnUnsupportedJava = false - bundledSignatures = ['jdk-unsafe', 'jdk-deprecated'] - signaturesURLs = [getClass().getResource('/forbidden/all-signatures.txt')] + bundledSignatures = ['jdk-unsafe', 'jdk-deprecated', 'jdk-system-out'] + signaturesURLs = [getClass().getResource('/forbidden/jdk-signatures.txt'), + getClass().getResource('/forbidden/es-all-signatures.txt')] suppressAnnotations = ['**.SuppressForbidden'] } Task mainForbidden = project.tasks.findByName('forbiddenApisMain') if (mainForbidden != null) { mainForbidden.configure { - bundledSignatures += 'jdk-system-out' - signaturesURLs += getClass().getResource('/forbidden/core-signatures.txt') + signaturesURLs += getClass().getResource('/forbidden/es-core-signatures.txt') } } Task testForbidden = project.tasks.findByName('forbiddenApisTest') if (testForbidden != null) { testForbidden.configure { - signaturesURLs += getClass().getResource('/forbidden/test-signatures.txt') + signaturesURLs += getClass().getResource('/forbidden/es-test-signatures.txt') } } Task forbiddenApis = project.tasks.findByName('forbiddenApis') @@ -117,4 +118,18 @@ class PrecommitTasks { } return null } + + private static Task configureLoggerUsage(Project project) { + Task loggerUsageTask = project.tasks.create('loggerUsageCheck', LoggerUsageTask.class) + + project.configurations.create('loggerUsagePlugin') + project.dependencies.add('loggerUsagePlugin', + "org.elasticsearch.test:logger-usage:${org.elasticsearch.gradle.VersionProperties.elasticsearch}") + + loggerUsageTask.configure { + classpath = project.configurations.loggerUsagePlugin + } + + return loggerUsageTask + } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy index c9db5657ba4..19b41cc8cde 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy @@ -23,8 +23,6 @@ import org.gradle.api.Project import org.gradle.api.file.FileCollection import org.gradle.api.tasks.Input -import java.time.LocalDateTime - /** Configuration for an elasticsearch cluster, used for integration tests. */ class ClusterConfiguration { @@ -34,6 +32,12 @@ class ClusterConfiguration { @Input int numNodes = 1 + @Input + int numBwcNodes = 0 + + @Input + String bwcVersion = null + @Input int httpPort = 0 @@ -49,6 +53,15 @@ class ClusterConfiguration { @Input String jvmArgs = System.getProperty('tests.jvm.argline', '') + /** + * The seed nodes port file. In the case the cluster has more than one node we use a seed node + * to form the cluster. The file is null if there is no seed node yet available. + * + * Note: this can only be null if the cluster has only one node or if the first node is not yet + * configured. All nodes but the first node should see a non null value. + */ + File seedNodePortsFile + /** * A closure to call before the cluster is considered ready. The closure is passed the node info, * as well as a groovy AntBuilder, to enable running ant condition checks. The default wait @@ -119,4 +132,12 @@ class ClusterConfiguration { } extraConfigFiles.put(path, sourceFile) } + + /** Returns an address and port suitable for a uri to connect to this clusters seed node over transport protocol*/ + String seedNodeTransportUri() { + if (seedNodePortsFile != null) { + return seedNodePortsFile.readLines("UTF-8").get(0) + } + return null; + } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index d96ee511051..a82fefdc510 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -53,13 +53,59 @@ class ClusterFormationTasks { // no need to add cluster formation tasks if the task won't run! return } - configureDistributionDependency(project, config.distribution) - List startTasks = [] + File sharedDir = new File(project.buildDir, "cluster/shared") + // first we remove everything in the shared cluster directory to ensure there are no leftovers in repos or anything + // in theory this should not be necessary but repositories are only deleted in the cluster-state and not on-disk + // such that snapshots survive failures / test runs and there is no simple way today to fix that. + Task cleanup = project.tasks.create(name: "${task.name}#prepareCluster.cleanShared", type: Delete, dependsOn: task.dependsOn.collect()) { + delete sharedDir + doLast { + sharedDir.mkdirs() + } + } + List startTasks = [cleanup] List nodes = [] + if (config.numNodes < config.numBwcNodes) { + throw new GradleException("numNodes must be >= numBwcNodes [${config.numNodes} < ${config.numBwcNodes}]") + } + if (config.numBwcNodes > 0 && config.bwcVersion == null) { + throw new GradleException("bwcVersion must not be null if numBwcNodes is > 0") + } + // this is our current version distribution configuration we use for all kinds of REST tests etc. + project.configurations { + elasticsearchDistro + } + configureDistributionDependency(project, config.distribution, project.configurations.elasticsearchDistro, VersionProperties.elasticsearch) + if (config.bwcVersion != null && config.numBwcNodes > 0) { + // if we have a cluster that has a BWC cluster we also need to configure a dependency on the BWC version + // this version uses the same distribution etc. and only differs in the version we depend on. + // from here on everything else works the same as if it's the current version, we fetch the BWC version + // from mirrors using gradles built-in mechanism etc. + project.configurations { + elasticsearchBwcDistro + } + configureDistributionDependency(project, config.distribution, project.configurations.elasticsearchBwcDistro, config.bwcVersion) + } + for (int i = 0; i < config.numNodes; ++i) { - NodeInfo node = new NodeInfo(config, i, project, task) + // we start N nodes and out of these N nodes there might be M bwc nodes. + // for each of those nodes we might have a different configuratioon + String elasticsearchVersion = VersionProperties.elasticsearch + Configuration configuration = project.configurations.elasticsearchDistro + if (i < config.numBwcNodes) { + elasticsearchVersion = config.bwcVersion + configuration = project.configurations.elasticsearchBwcDistro + } + NodeInfo node = new NodeInfo(config, i, project, task, elasticsearchVersion, sharedDir) + if (i == 0) { + if (config.seedNodePortsFile != null) { + // we might allow this in the future to be set but for now we are the only authority to set this! + throw new GradleException("seedNodePortsFile has a non-null value but first node has not been intialized") + } + config.seedNodePortsFile = node.transportPortsFile; + } nodes.add(node) - startTasks.add(configureNode(project, task, node)) + startTasks.add(configureNode(project, task, cleanup, node, configuration)) } Task wait = configureWaitTask("${task.name}#wait", project, nodes, startTasks) @@ -70,20 +116,14 @@ class ClusterFormationTasks { } /** Adds a dependency on the given distribution */ - static void configureDistributionDependency(Project project, String distro) { - String elasticsearchVersion = VersionProperties.elasticsearch + static void configureDistributionDependency(Project project, String distro, Configuration configuration, String elasticsearchVersion) { String packaging = distro if (distro == 'tar') { packaging = 'tar.gz' } else if (distro == 'integ-test-zip') { packaging = 'zip' } - project.configurations { - elasticsearchDistro - } - project.dependencies { - elasticsearchDistro "org.elasticsearch.distribution.${distro}:elasticsearch:${elasticsearchVersion}@${packaging}" - } + project.dependencies.add(configuration.name, "org.elasticsearch.distribution.${distro}:elasticsearch:${elasticsearchVersion}@${packaging}") } /** @@ -103,10 +143,10 @@ class ClusterFormationTasks { * * @return a task which starts the node. */ - static Task configureNode(Project project, Task task, NodeInfo node) { + static Task configureNode(Project project, Task task, Object dependsOn, NodeInfo node, Configuration configuration) { // tasks are chained so their execution order is maintained - Task setup = project.tasks.create(name: taskName(task, node, 'clean'), type: Delete, dependsOn: task.dependsOn.collect()) { + Task setup = project.tasks.create(name: taskName(task, node, 'clean'), type: Delete, dependsOn: dependsOn) { delete node.homeDir delete node.cwd doLast { @@ -115,7 +155,7 @@ class ClusterFormationTasks { } setup = configureCheckPreviousTask(taskName(task, node, 'checkPrevious'), project, setup, node) setup = configureStopTask(taskName(task, node, 'stopPrevious'), project, setup, node) - setup = configureExtractTask(taskName(task, node, 'extract'), project, setup, node) + setup = configureExtractTask(taskName(task, node, 'extract'), project, setup, node, configuration) setup = configureWriteConfigTask(taskName(task, node, 'configure'), project, setup, node) setup = configureExtraConfigFilesTask(taskName(task, node, 'extraConfig'), project, setup, node) setup = configureCopyPluginsTask(taskName(task, node, 'copyPlugins'), project, setup, node) @@ -151,27 +191,28 @@ class ClusterFormationTasks { } /** Adds a task to extract the elasticsearch distribution */ - static Task configureExtractTask(String name, Project project, Task setup, NodeInfo node) { - List extractDependsOn = [project.configurations.elasticsearchDistro, setup] - /* project.configurations.elasticsearchDistro.singleFile will be an - external artifact if this is being run by a plugin not living in the - elasticsearch source tree. If this is a plugin built in the - elasticsearch source tree or this is a distro in the elasticsearch - source tree then this should be the version of elasticsearch built - by the source tree. If it isn't then Bad Things(TM) will happen. */ + static Task configureExtractTask(String name, Project project, Task setup, NodeInfo node, Configuration configuration) { + List extractDependsOn = [configuration, setup] + /* configuration.singleFile will be an external artifact if this is being run by a plugin not living in the + elasticsearch source tree. If this is a plugin built in the elasticsearch source tree or this is a distro in + the elasticsearch source tree then this should be the version of elasticsearch built by the source tree. + If it isn't then Bad Things(TM) will happen. */ Task extract + switch (node.config.distribution) { case 'integ-test-zip': case 'zip': extract = project.tasks.create(name: name, type: Copy, dependsOn: extractDependsOn) { - from { project.zipTree(project.configurations.elasticsearchDistro.singleFile) } + from { + project.zipTree(configuration.singleFile) + } into node.baseDir } break; case 'tar': extract = project.tasks.create(name: name, type: Copy, dependsOn: extractDependsOn) { from { - project.tarTree(project.resources.gzip(project.configurations.elasticsearchDistro.singleFile)) + project.tarTree(project.resources.gzip(configuration.singleFile)) } into node.baseDir } @@ -180,7 +221,7 @@ class ClusterFormationTasks { File rpmDatabase = new File(node.baseDir, 'rpm-database') File rpmExtracted = new File(node.baseDir, 'rpm-extracted') /* Delay reading the location of the rpm file until task execution */ - Object rpm = "${ -> project.configurations.elasticsearchDistro.singleFile}" + Object rpm = "${ -> configuration.singleFile}" extract = project.tasks.create(name: name, type: LoggedExec, dependsOn: extractDependsOn) { commandLine 'rpm', '--badreloc', '--nodeps', '--noscripts', '--notriggers', '--dbpath', rpmDatabase, @@ -195,7 +236,7 @@ class ClusterFormationTasks { case 'deb': /* Delay reading the location of the deb file until task execution */ File debExtracted = new File(node.baseDir, 'deb-extracted') - Object deb = "${ -> project.configurations.elasticsearchDistro.singleFile}" + Object deb = "${ -> configuration.singleFile}" extract = project.tasks.create(name: name, type: LoggedExec, dependsOn: extractDependsOn) { commandLine 'dpkg-deb', '-x', deb, debExtracted doFirst { @@ -214,26 +255,28 @@ class ClusterFormationTasks { Map esConfig = [ 'cluster.name' : node.clusterName, 'pidfile' : node.pidFile, - 'path.repo' : "${node.homeDir}/repo", - 'path.shared_data' : "${node.homeDir}/../", + 'path.repo' : "${node.sharedDir}/repo", + 'path.shared_data' : "${node.sharedDir}/", // Define a node attribute so we can test that it exists 'node.testattr' : 'test', 'repositories.url.allowed_urls': 'http://snapshot.test*' ] - if (node.config.numNodes == 1) { - esConfig['http.port'] = node.config.httpPort - esConfig['transport.tcp.port'] = node.config.transportPort - } else { - // TODO: fix multi node so it doesn't use hardcoded prots - esConfig['http.port'] = 9400 + node.nodeNum - esConfig['transport.tcp.port'] = 9500 + node.nodeNum - esConfig['discovery.zen.ping.unicast.hosts'] = (0.. 0) { // multi-node cluster case, we have to wait for the seed node to startup + ant.waitfor(maxwait: '20', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond') { + resourceexists { + file(file: node.config.seedNodePortsFile.toString()) + } + } + // the seed node is enough to form the cluster - all subsequent nodes will get the seed node as a unicast + // host and join the cluster via that. + esConfig['discovery.zen.ping.unicast.hosts'] = "\"${node.config.seedNodeTransportUri()}\"" + } File configFile = new File(node.confDir, 'elasticsearch.yml') logger.info("Configuring ${configFile}") configFile.setText(esConfig.collect { key, value -> "${key}: ${value}" }.join('\n'), 'UTF-8') diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy index b41b1822000..f68084c61fe 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy @@ -40,6 +40,9 @@ class NodeInfo { /** root directory all node files and operations happen under */ File baseDir + /** shared data directory all nodes share */ + File sharedDir + /** the pid file the node will use */ File pidFile @@ -89,14 +92,15 @@ class NodeInfo { ByteArrayOutputStream buffer = new ByteArrayOutputStream() /** Creates a node to run as part of a cluster for the given task */ - NodeInfo(ClusterConfiguration config, int nodeNum, Project project, Task task) { + NodeInfo(ClusterConfiguration config, int nodeNum, Project project, Task task, String nodeVersion, File sharedDir) { this.config = config this.nodeNum = nodeNum + this.sharedDir = sharedDir clusterName = "${task.path.replace(':', '_').substring(1)}" baseDir = new File(project.buildDir, "cluster/${task.name} node${nodeNum}") pidFile = new File(baseDir, 'es.pid') - homeDir = homeDir(baseDir, config.distribution) - confDir = confDir(baseDir, config.distribution) + homeDir = homeDir(baseDir, config.distribution, nodeVersion) + confDir = confDir(baseDir, config.distribution, nodeVersion) configFile = new File(confDir, 'elasticsearch.yml') // even for rpm/deb, the logs are under home because we dont start with real services File logsDir = new File(homeDir, 'logs') @@ -129,14 +133,15 @@ class NodeInfo { 'JAVA_HOME' : project.javaHome, 'ES_GC_OPTS': config.jvmArgs // we pass these with the undocumented gc opts so the argline can set gc, etc ] - args.add("-Des.node.portsfile=true") - args.addAll(config.systemProperties.collect { key, value -> "-D${key}=${value}" }) + args.addAll("-E", "es.node.portsfile=true") + env.put('ES_JAVA_OPTS', config.systemProperties.collect { key, value -> "-D${key}=${value}" }.join(" ")) for (Map.Entry property : System.properties.entrySet()) { if (property.getKey().startsWith('es.')) { - args.add("-D${property.getKey()}=${property.getValue()}") + args.add("-E") + args.add("${property.getKey()}=${property.getValue()}") } } - args.add("-Des.path.conf=${confDir}") + args.addAll("-E", "es.path.conf=${confDir}") if (Os.isFamily(Os.FAMILY_WINDOWS)) { args.add('"') // end the entire command, quoted } @@ -181,13 +186,13 @@ class NodeInfo { } /** Returns the directory elasticsearch home is contained in for the given distribution */ - static File homeDir(File baseDir, String distro) { + static File homeDir(File baseDir, String distro, String nodeVersion) { String path switch (distro) { case 'integ-test-zip': case 'zip': case 'tar': - path = "elasticsearch-${VersionProperties.elasticsearch}" + path = "elasticsearch-${nodeVersion}" break case 'rpm': case 'deb': @@ -199,12 +204,12 @@ class NodeInfo { return new File(baseDir, path) } - static File confDir(File baseDir, String distro) { + static File confDir(File baseDir, String distro, String nodeVersion) { switch (distro) { case 'integ-test-zip': case 'zip': case 'tar': - return new File(homeDir(baseDir, distro), 'config') + return new File(homeDir(baseDir, distro, nodeVersion), 'config') case 'rpm': case 'deb': return new File(baseDir, "${distro}-extracted/etc/elasticsearch") diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 60a11d951f9..46ddcb0ad0e 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -208,7 +208,6 @@ - @@ -259,7 +258,6 @@ - @@ -269,14 +267,12 @@ - - @@ -393,7 +389,6 @@ - @@ -464,7 +459,6 @@ - @@ -617,7 +611,6 @@ - @@ -669,10 +662,7 @@ - - - @@ -896,7 +886,6 @@ - @@ -907,12 +896,9 @@ - - - @@ -1336,14 +1322,9 @@ - - - - - @@ -1403,7 +1384,6 @@ - @@ -1425,7 +1405,6 @@ - @@ -1436,12 +1415,10 @@ - - @@ -1486,7 +1463,6 @@ - @@ -1598,7 +1574,6 @@ - diff --git a/buildSrc/src/main/resources/forbidden/es-all-signatures.txt b/buildSrc/src/main/resources/forbidden/es-all-signatures.txt new file mode 100644 index 00000000000..d258c098911 --- /dev/null +++ b/buildSrc/src/main/resources/forbidden/es-all-signatures.txt @@ -0,0 +1,30 @@ +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on +# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + +java.nio.file.Paths @ Use org.elasticsearch.common.io.PathUtils.get() instead. +java.nio.file.FileSystems#getDefault() @ use org.elasticsearch.common.io.PathUtils.getDefaultFileSystem() instead. + +java.nio.file.Files#getFileStore(java.nio.file.Path) @ Use org.elasticsearch.env.Environment.getFileStore() instead, impacted by JDK-8034057 +java.nio.file.Files#isWritable(java.nio.file.Path) @ Use org.elasticsearch.env.Environment.isWritable() instead, impacted by JDK-8034057 + +@defaultMessage Use org.elasticsearch.common.Randomness#get for reproducible sources of randomness +java.util.Random#() +java.util.concurrent.ThreadLocalRandom + +java.security.MessageDigest#clone() @ use org.elasticsearch.common.hash.MessageDigests + +@defaultMessage this should not have been added to lucene in the first place +org.apache.lucene.index.IndexReader#getCombinedCoreAndDeletesKey() diff --git a/buildSrc/src/main/resources/forbidden/core-signatures.txt b/buildSrc/src/main/resources/forbidden/es-core-signatures.txt similarity index 80% rename from buildSrc/src/main/resources/forbidden/core-signatures.txt rename to buildSrc/src/main/resources/forbidden/es-core-signatures.txt index c6ab430595c..059be403a67 100644 --- a/buildSrc/src/main/resources/forbidden/core-signatures.txt +++ b/buildSrc/src/main/resources/forbidden/es-core-signatures.txt @@ -41,14 +41,10 @@ org.apache.lucene.index.IndexReader#addReaderClosedListener(org.apache.lucene.in org.apache.lucene.index.IndexReader#removeReaderClosedListener(org.apache.lucene.index.IndexReader$ReaderClosedListener) @defaultMessage Pass the precision step from the mappings explicitly instead -org.apache.lucene.search.NumericRangeQuery#newDoubleRange(java.lang.String,java.lang.Double,java.lang.Double,boolean,boolean) -org.apache.lucene.search.NumericRangeQuery#newFloatRange(java.lang.String,java.lang.Float,java.lang.Float,boolean,boolean) -org.apache.lucene.search.NumericRangeQuery#newIntRange(java.lang.String,java.lang.Integer,java.lang.Integer,boolean,boolean) -org.apache.lucene.search.NumericRangeQuery#newLongRange(java.lang.String,java.lang.Long,java.lang.Long,boolean,boolean) -org.apache.lucene.search.NumericRangeFilter#newDoubleRange(java.lang.String,java.lang.Double,java.lang.Double,boolean,boolean) -org.apache.lucene.search.NumericRangeFilter#newFloatRange(java.lang.String,java.lang.Float,java.lang.Float,boolean,boolean) -org.apache.lucene.search.NumericRangeFilter#newIntRange(java.lang.String,java.lang.Integer,java.lang.Integer,boolean,boolean) -org.apache.lucene.search.NumericRangeFilter#newLongRange(java.lang.String,java.lang.Long,java.lang.Long,boolean,boolean) +org.apache.lucene.search.LegacyNumericRangeQuery#newDoubleRange(java.lang.String,java.lang.Double,java.lang.Double,boolean,boolean) +org.apache.lucene.search.LegacyNumericRangeQuery#newFloatRange(java.lang.String,java.lang.Float,java.lang.Float,boolean,boolean) +org.apache.lucene.search.LegacyNumericRangeQuery#newIntRange(java.lang.String,java.lang.Integer,java.lang.Integer,boolean,boolean) +org.apache.lucene.search.LegacyNumericRangeQuery#newLongRange(java.lang.String,java.lang.Long,java.lang.Long,boolean,boolean) @defaultMessage Only use wait / notify when really needed try to use concurrency primitives, latches or callbacks instead. java.lang.Object#wait() @@ -88,9 +84,6 @@ java.util.concurrent.Future#cancel(boolean) org.elasticsearch.common.io.PathUtils#get(java.lang.String, java.lang.String[]) org.elasticsearch.common.io.PathUtils#get(java.net.URI) -@defaultMessage Don't use deprecated Query#setBoost, wrap the query into a BoostQuery instead -org.apache.lucene.search.Query#setBoost(float) - @defaultMessage Constructing a DateTime without a time zone is dangerous org.joda.time.DateTime#() org.joda.time.DateTime#(long) diff --git a/buildSrc/src/main/resources/forbidden/test-signatures.txt b/buildSrc/src/main/resources/forbidden/es-test-signatures.txt similarity index 100% rename from buildSrc/src/main/resources/forbidden/test-signatures.txt rename to buildSrc/src/main/resources/forbidden/es-test-signatures.txt diff --git a/buildSrc/src/main/resources/forbidden/all-signatures.txt b/buildSrc/src/main/resources/forbidden/jdk-signatures.txt similarity index 77% rename from buildSrc/src/main/resources/forbidden/all-signatures.txt rename to buildSrc/src/main/resources/forbidden/jdk-signatures.txt index 3c56a03b293..994b1ad3a4a 100644 --- a/buildSrc/src/main/resources/forbidden/all-signatures.txt +++ b/buildSrc/src/main/resources/forbidden/jdk-signatures.txt @@ -33,23 +33,6 @@ java.util.Formatter#(java.lang.String,java.lang.String,java.util.Locale) java.io.RandomAccessFile java.nio.file.Path#toFile() -@defaultMessage Don't use deprecated lucene apis -org.apache.lucene.index.DocsEnum -org.apache.lucene.index.DocsAndPositionsEnum -org.apache.lucene.queries.TermFilter -org.apache.lucene.queries.TermsFilter -org.apache.lucene.search.Filter -org.apache.lucene.search.FilteredQuery -org.apache.lucene.search.TermRangeFilter -org.apache.lucene.search.NumericRangeFilter -org.apache.lucene.search.PrefixFilter -org.apache.lucene.search.QueryWrapperFilter -org.apache.lucene.search.join.BitDocIdSetCachingWrapperFilter -org.apache.lucene.index.IndexWriter#isLocked(org.apache.lucene.store.Directory) - -java.nio.file.Paths @ Use org.elasticsearch.common.io.PathUtils.get() instead. -java.nio.file.FileSystems#getDefault() @ use org.elasticsearch.common.io.PathUtils.getDefaultFileSystem() instead. - @defaultMessage Specify a location for the temp file/directory instead. java.nio.file.Files#createTempDirectory(java.lang.String,java.nio.file.attribute.FileAttribute[]) java.nio.file.Files#createTempFile(java.lang.String,java.lang.String,java.nio.file.attribute.FileAttribute[]) @@ -62,9 +45,6 @@ java.io.ObjectInput java.nio.file.Files#isHidden(java.nio.file.Path) @ Dependent on the operating system, use FileSystemUtils.isHidden instead -java.nio.file.Files#getFileStore(java.nio.file.Path) @ Use org.elasticsearch.env.Environment.getFileStore() instead, impacted by JDK-8034057 -java.nio.file.Files#isWritable(java.nio.file.Path) @ Use org.elasticsearch.env.Environment.isWritable() instead, impacted by JDK-8034057 - @defaultMessage Resolve hosts explicitly to the address(es) you want with InetAddress. java.net.InetSocketAddress#(java.lang.String,int) java.net.Socket#(java.lang.String,int) @@ -103,9 +83,6 @@ java.lang.Class#getDeclaredMethods() @ Do not violate java's access system: Use java.lang.reflect.AccessibleObject#setAccessible(boolean) java.lang.reflect.AccessibleObject#setAccessible(java.lang.reflect.AccessibleObject[], boolean) -@defaultMessage this should not have been added to lucene in the first place -org.apache.lucene.index.IndexReader#getCombinedCoreAndDeletesKey() - @defaultMessage this method needs special permission java.lang.Thread#getAllStackTraces() @@ -126,8 +103,3 @@ java.util.Collections#EMPTY_MAP java.util.Collections#EMPTY_SET java.util.Collections#shuffle(java.util.List) @ Use java.util.Collections#shuffle(java.util.List, java.util.Random) with a reproducible source of randomness -@defaultMessage Use org.elasticsearch.common.Randomness#get for reproducible sources of randomness -java.util.Random#() -java.util.concurrent.ThreadLocalRandom - -java.security.MessageDigest#clone() @ use org.elasticsearch.common.hash.MessageDigests diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 54b16db2cb1..39c32192052 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,8 +1,8 @@ elasticsearch = 5.0.0 -lucene = 5.5.0 +lucene = 6.0.0-snapshot-f0aa4fc # optional dependencies -spatial4j = 0.5 +spatial4j = 0.6 jts = 1.13 jackson = 2.7.1 log4j = 1.2.17 diff --git a/core/build.gradle b/core/build.gradle index ac3f421211d..ab3754e72ff 100644 --- a/core/build.gradle +++ b/core/build.gradle @@ -42,13 +42,14 @@ dependencies { compile "org.apache.lucene:lucene-queryparser:${versions.lucene}" compile "org.apache.lucene:lucene-sandbox:${versions.lucene}" compile "org.apache.lucene:lucene-spatial:${versions.lucene}" + compile "org.apache.lucene:lucene-spatial-extras:${versions.lucene}" compile "org.apache.lucene:lucene-spatial3d:${versions.lucene}" compile "org.apache.lucene:lucene-suggest:${versions.lucene}" compile 'org.elasticsearch:securesm:1.0' // utilities - compile 'commons-cli:commons-cli:1.3.1' + compile 'net.sf.jopt-simple:jopt-simple:4.9' compile 'com.carrotsearch:hppc:0.7.1' // time handling, remove with java 8 time @@ -71,7 +72,7 @@ dependencies { compile 'org.hdrhistogram:HdrHistogram:2.1.6' // lucene spatial - compile "com.spatial4j:spatial4j:${versions.spatial4j}", optional + compile "org.locationtech.spatial4j:spatial4j:${versions.spatial4j}", optional compile "com.vividsolutions:jts:${versions.jts}", optional // logging @@ -168,11 +169,6 @@ thirdPartyAudit.excludes = [ 'org.apache.commons.logging.Log', 'org.apache.commons.logging.LogFactory', - // from org.apache.lucene.sandbox.queries.regex.JakartaRegexpCapabilities$JakartaRegexMatcher (lucene-sandbox) - 'org.apache.regexp.CharacterIterator', - 'org.apache.regexp.RE', - 'org.apache.regexp.REProgram', - // from org.jboss.netty.handler.ssl.OpenSslEngine (netty) 'org.apache.tomcat.jni.Buffer', 'org.apache.tomcat.jni.Library', @@ -210,7 +206,7 @@ thirdPartyAudit.excludes = [ 'org.jboss.marshalling.MarshallingConfiguration', 'org.jboss.marshalling.Unmarshaller', - // from com.spatial4j.core.io.GeoJSONReader (spatial4j) + // from org.locationtech.spatial4j.io.GeoJSONReader (spatial4j) 'org.noggit.JSONParser', // from org.jboss.netty.container.osgi.NettyBundleActivator (netty) diff --git a/core/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java b/core/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java index 798fac01a7a..4e24944ffac 100644 --- a/core/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java +++ b/core/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java @@ -33,7 +33,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.InPlaceMergeSorter; -import org.apache.lucene.util.ToStringUtils; import java.io.IOException; import java.util.ArrayList; @@ -247,14 +246,15 @@ public abstract class BlendedTermQuery extends Query { if (boosts != null) { boost = boosts[i]; } - builder.append(ToStringUtils.boost(boost)); + if (boost != 1f) { + builder.append('^').append(boost); + } builder.append(", "); } if (terms.length > 0) { builder.setLength(builder.length() - 2); } builder.append("])"); - builder.append(ToStringUtils.boost(getBoost())); return builder.toString(); } diff --git a/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java b/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java index f153cd53c55..6ddd7591caa 100644 --- a/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java +++ b/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java @@ -24,6 +24,7 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.FuzzyQuery; @@ -165,7 +166,7 @@ public class MapperQueryParser extends QueryParser { } if (clauses.size() == 0) // happens for stopwords return null; - return getBooleanQuery(clauses, true); + return getBooleanQueryCoordDisabled(clauses); } } else { return getFieldQuerySingle(field, queryText, quoted); @@ -267,7 +268,7 @@ public class MapperQueryParser extends QueryParser { } if (clauses.size() == 0) // happens for stopwords return null; - return getBooleanQuery(clauses, true); + return getBooleanQueryCoordDisabled(clauses); } } else { return super.getFieldQuery(field, queryText, slop); @@ -318,7 +319,7 @@ public class MapperQueryParser extends QueryParser { } if (clauses.size() == 0) // happens for stopwords return null; - return getBooleanQuery(clauses, true); + return getBooleanQueryCoordDisabled(clauses); } } @@ -380,7 +381,7 @@ public class MapperQueryParser extends QueryParser { clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD)); } } - return getBooleanQuery(clauses, true); + return getBooleanQueryCoordDisabled(clauses); } } else { return getFuzzyQuerySingle(field, termStr, minSimilarity); @@ -445,7 +446,7 @@ public class MapperQueryParser extends QueryParser { } if (clauses.size() == 0) // happens for stopwords return null; - return getBooleanQuery(clauses, true); + return getBooleanQueryCoordDisabled(clauses); } } else { return getPrefixQuerySingle(field, termStr); @@ -520,7 +521,7 @@ public class MapperQueryParser extends QueryParser { for (String token : tlist) { clauses.add(new BooleanClause(super.getPrefixQuery(field, token), BooleanClause.Occur.SHOULD)); } - return getBooleanQuery(clauses, true); + return getBooleanQueryCoordDisabled(clauses); } } @@ -575,7 +576,7 @@ public class MapperQueryParser extends QueryParser { } if (clauses.size() == 0) // happens for stopwords return null; - return getBooleanQuery(clauses, true); + return getBooleanQueryCoordDisabled(clauses); } } else { return getWildcardQuerySingle(field, termStr); @@ -704,7 +705,7 @@ public class MapperQueryParser extends QueryParser { } if (clauses.size() == 0) // happens for stopwords return null; - return getBooleanQuery(clauses, true); + return getBooleanQueryCoordDisabled(clauses); } } else { return getRegexpQuerySingle(field, termStr); @@ -739,10 +740,24 @@ public class MapperQueryParser extends QueryParser { setAnalyzer(oldAnalyzer); } } + + /** + * @deprecated review all use of this, don't rely on coord + */ + @Deprecated + protected Query getBooleanQueryCoordDisabled(List clauses) throws ParseException { + BooleanQuery.Builder builder = new BooleanQuery.Builder(); + builder.setDisableCoord(true); + for (BooleanClause clause : clauses) { + builder.add(clause); + } + return fixNegativeQueryIfNeeded(builder.build()); + } + @Override - protected Query getBooleanQuery(List clauses, boolean disableCoord) throws ParseException { - Query q = super.getBooleanQuery(clauses, disableCoord); + protected Query getBooleanQuery(List clauses) throws ParseException { + Query q = super.getBooleanQuery(clauses); if (q == null) { return null; } @@ -769,12 +784,12 @@ public class MapperQueryParser extends QueryParser { } pq = builder.build(); //make sure that the boost hasn't been set beforehand, otherwise we'd lose it - assert q.getBoost() == 1f; assert q instanceof BoostQuery == false; return pq; } else if (q instanceof MultiPhraseQuery) { - ((MultiPhraseQuery) q).setSlop(slop); - return q; + MultiPhraseQuery.Builder builder = new MultiPhraseQuery.Builder((MultiPhraseQuery) q); + builder.setSlop(slop); + return builder.build(); } else { return q; } diff --git a/core/src/main/java/org/apache/lucene/search/XFilteredDocIdSetIterator.java b/core/src/main/java/org/apache/lucene/search/XFilteredDocIdSetIterator.java index 92f2f443f0a..8d1617d3ab4 100644 --- a/core/src/main/java/org/apache/lucene/search/XFilteredDocIdSetIterator.java +++ b/core/src/main/java/org/apache/lucene/search/XFilteredDocIdSetIterator.java @@ -26,8 +26,7 @@ import java.io.IOException; /** * Abstract decorator class of a DocIdSetIterator * implementation that provides on-demand filter/validation - * mechanism on an underlying DocIdSetIterator. See {@link - * FilteredDocIdSet}. + * mechanism on an underlying DocIdSetIterator. */ public abstract class XFilteredDocIdSetIterator extends DocIdSetIterator { protected DocIdSetIterator _innerIter; diff --git a/core/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java b/core/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java index 11b56bdcfe1..3c0bda97347 100644 --- a/core/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java +++ b/core/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java @@ -34,7 +34,6 @@ import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; import java.io.IOException; import java.util.Collection; -import java.util.List; /** * @@ -68,7 +67,7 @@ public class CustomFieldQuery extends FieldQuery { flatten(((FiltersFunctionScoreQuery) sourceQuery).getSubQuery(), reader, flatQueries, boost); } else if (sourceQuery instanceof MultiPhraseQuery) { MultiPhraseQuery q = ((MultiPhraseQuery) sourceQuery); - convertMultiPhraseQuery(0, new int[q.getTermArrays().size()], q, q.getTermArrays(), q.getPositions(), reader, flatQueries); + convertMultiPhraseQuery(0, new int[q.getTermArrays().length], q, q.getTermArrays(), q.getPositions(), reader, flatQueries); } else if (sourceQuery instanceof BlendedTermQuery) { final BlendedTermQuery blendedTermQuery = (BlendedTermQuery) sourceQuery; flatten(blendedTermQuery.rewrite(reader), reader, flatQueries, boost); @@ -77,7 +76,7 @@ public class CustomFieldQuery extends FieldQuery { } } - private void convertMultiPhraseQuery(int currentPos, int[] termsIdx, MultiPhraseQuery orig, List terms, int[] pos, IndexReader reader, Collection flatQueries) throws IOException { + private void convertMultiPhraseQuery(int currentPos, int[] termsIdx, MultiPhraseQuery orig, Term[][] terms, int[] pos, IndexReader reader, Collection flatQueries) throws IOException { if (currentPos == 0) { // if we have more than 16 terms int numTerms = 0; @@ -87,7 +86,7 @@ public class CustomFieldQuery extends FieldQuery { if (numTerms > 16) { for (Term[] currentPosTerm : terms) { for (Term term : currentPosTerm) { - super.flatten(new TermQuery(term), reader, flatQueries, orig.getBoost()); + super.flatten(new TermQuery(term), reader, flatQueries, 1F); } } return; @@ -97,16 +96,16 @@ public class CustomFieldQuery extends FieldQuery { * we walk all possible ways and for each path down the MPQ we create a PhraseQuery this is what FieldQuery supports. * It seems expensive but most queries will pretty small. */ - if (currentPos == terms.size()) { + if (currentPos == terms.length) { PhraseQuery.Builder queryBuilder = new PhraseQuery.Builder(); queryBuilder.setSlop(orig.getSlop()); for (int i = 0; i < termsIdx.length; i++) { - queryBuilder.add(terms.get(i)[termsIdx[i]], pos[i]); + queryBuilder.add(terms[i][termsIdx[i]], pos[i]); } Query query = queryBuilder.build(); - this.flatten(query, reader, flatQueries, orig.getBoost()); + this.flatten(query, reader, flatQueries, 1F); } else { - Term[] t = terms.get(currentPos); + Term[] t = terms[currentPos]; for (int i = 0; i < t.length; i++) { termsIdx[currentPos] = i; convertMultiPhraseQuery(currentPos+1, termsIdx, orig, terms, pos, reader, flatQueries); diff --git a/core/src/main/java/org/elasticsearch/ElasticsearchException.java b/core/src/main/java/org/elasticsearch/ElasticsearchException.java index d069bddfdfe..7fd81f5ddfe 100644 --- a/core/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/core/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -645,8 +645,6 @@ public class ElasticsearchException extends RuntimeException implements ToXConte // 87 used to be for MergeMappingException INVALID_INDEX_TEMPLATE_EXCEPTION(org.elasticsearch.indices.InvalidIndexTemplateException.class, org.elasticsearch.indices.InvalidIndexTemplateException::new, 88), - PERCOLATE_EXCEPTION(org.elasticsearch.percolator.PercolateException.class, - org.elasticsearch.percolator.PercolateException::new, 89), REFRESH_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.RefreshFailedEngineException.class, org.elasticsearch.index.engine.RefreshFailedEngineException::new, 90), AGGREGATION_INITIALIZATION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationInitializationException.class, diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index 1c9a5464bb2..8b65adf170d 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -35,212 +35,10 @@ import java.io.IOException; @SuppressWarnings("deprecation") public class Version { - // The logic for ID is: XXYYZZAA, where XX is major version, YY is minor version, ZZ is revision, and AA is Beta/RC indicator - // AA values below 50 are beta builds, and below 99 are RC builds, with 99 indicating a release + // The logic for ID is: XXYYZZAA, where XX is major version, YY is minor version, ZZ is revision, and AA is alpha/beta/rc indicator + // AA values below 25 are for alpha builder (since 5.0), and above 25 and below 50 are beta builds, and below 99 are RC builds, with 99 indicating a release // the (internal) format of the id is there so we can easily do after/before checks on the id - // NOTE: indexes created with 3.6 use this constant for e.g. analysis chain emulation (imperfect) - public static final org.apache.lucene.util.Version LUCENE_3_EMULATION_VERSION = org.apache.lucene.util.Version.LUCENE_4_0_0; - - public static final int V_0_18_0_ID = /*00*/180099; - public static final Version V_0_18_0 = new Version(V_0_18_0_ID, LUCENE_3_EMULATION_VERSION); - public static final int V_0_18_1_ID = /*00*/180199; - public static final Version V_0_18_1 = new Version(V_0_18_1_ID, LUCENE_3_EMULATION_VERSION); - public static final int V_0_18_2_ID = /*00*/180299; - public static final Version V_0_18_2 = new Version(V_0_18_2_ID, LUCENE_3_EMULATION_VERSION); - public static final int V_0_18_3_ID = /*00*/180399; - public static final Version V_0_18_3 = new Version(V_0_18_3_ID, LUCENE_3_EMULATION_VERSION); - public static final int V_0_18_4_ID = /*00*/180499; - public static final Version V_0_18_4 = new Version(V_0_18_4_ID, LUCENE_3_EMULATION_VERSION); - public static final int V_0_18_5_ID = /*00*/180599; - public static final Version V_0_18_5 = new Version(V_0_18_5_ID, LUCENE_3_EMULATION_VERSION); - public static final int V_0_18_6_ID = /*00*/180699; - public static final Version V_0_18_6 = new Version(V_0_18_6_ID, LUCENE_3_EMULATION_VERSION); - public static final int V_0_18_7_ID = /*00*/180799; - public static final Version V_0_18_7 = new Version(V_0_18_7_ID, LUCENE_3_EMULATION_VERSION); - public static final int V_0_18_8_ID = /*00*/180899; - public static final Version V_0_18_8 = new Version(V_0_18_8_ID, LUCENE_3_EMULATION_VERSION); - - public static final int V_0_19_0_RC1_ID = /*00*/190051; - public static final Version V_0_19_0_RC1 = new Version(V_0_19_0_RC1_ID, LUCENE_3_EMULATION_VERSION); - - public static final int V_0_19_0_RC2_ID = /*00*/190052; - public static final Version V_0_19_0_RC2 = new Version(V_0_19_0_RC2_ID, LUCENE_3_EMULATION_VERSION); - - public static final int V_0_19_0_RC3_ID = /*00*/190053; - public static final Version V_0_19_0_RC3 = new Version(V_0_19_0_RC3_ID, LUCENE_3_EMULATION_VERSION); - - public static final int V_0_19_0_ID = /*00*/190099; - public static final Version V_0_19_0 = new Version(V_0_19_0_ID, LUCENE_3_EMULATION_VERSION); - public static final int V_0_19_1_ID = /*00*/190199; - public static final Version V_0_19_1 = new Version(V_0_19_1_ID, LUCENE_3_EMULATION_VERSION); - public static final int V_0_19_2_ID = /*00*/190299; - public static final Version V_0_19_2 = new Version(V_0_19_2_ID, LUCENE_3_EMULATION_VERSION); - public static final int V_0_19_3_ID = /*00*/190399; - public static final Version V_0_19_3 = new Version(V_0_19_3_ID, LUCENE_3_EMULATION_VERSION); - public static final int V_0_19_4_ID = /*00*/190499; - public static final Version V_0_19_4 = new Version(V_0_19_4_ID, LUCENE_3_EMULATION_VERSION); - public static final int V_0_19_5_ID = /*00*/190599; - public static final Version V_0_19_5 = new Version(V_0_19_5_ID, LUCENE_3_EMULATION_VERSION); - public static final int V_0_19_6_ID = /*00*/190699; - public static final Version V_0_19_6 = new Version(V_0_19_6_ID, LUCENE_3_EMULATION_VERSION); - public static final int V_0_19_7_ID = /*00*/190799; - public static final Version V_0_19_7 = new Version(V_0_19_7_ID, LUCENE_3_EMULATION_VERSION); - public static final int V_0_19_8_ID = /*00*/190899; - public static final Version V_0_19_8 = new Version(V_0_19_8_ID, LUCENE_3_EMULATION_VERSION); - public static final int V_0_19_9_ID = /*00*/190999; - public static final Version V_0_19_9 = new Version(V_0_19_9_ID, LUCENE_3_EMULATION_VERSION); - public static final int V_0_19_10_ID = /*00*/191099; - public static final Version V_0_19_10 = new Version(V_0_19_10_ID, LUCENE_3_EMULATION_VERSION); - public static final int V_0_19_11_ID = /*00*/191199; - public static final Version V_0_19_11 = new Version(V_0_19_11_ID, LUCENE_3_EMULATION_VERSION); - public static final int V_0_19_12_ID = /*00*/191299; - public static final Version V_0_19_12 = new Version(V_0_19_12_ID, LUCENE_3_EMULATION_VERSION); - public static final int V_0_19_13_ID = /*00*/191399; - public static final Version V_0_19_13 = new Version(V_0_19_13_ID, LUCENE_3_EMULATION_VERSION); - - public static final int V_0_20_0_RC1_ID = /*00*/200051; - public static final Version V_0_20_0_RC1 = new Version(V_0_20_0_RC1_ID, LUCENE_3_EMULATION_VERSION); - public static final int V_0_20_0_ID = /*00*/200099; - public static final Version V_0_20_0 = new Version(V_0_20_0_ID, LUCENE_3_EMULATION_VERSION); - public static final int V_0_20_1_ID = /*00*/200199; - public static final Version V_0_20_1 = new Version(V_0_20_1_ID, LUCENE_3_EMULATION_VERSION); - public static final int V_0_20_2_ID = /*00*/200299; - public static final Version V_0_20_2 = new Version(V_0_20_2_ID, LUCENE_3_EMULATION_VERSION); - public static final int V_0_20_3_ID = /*00*/200399; - public static final Version V_0_20_3 = new Version(V_0_20_3_ID, LUCENE_3_EMULATION_VERSION); - public static final int V_0_20_4_ID = /*00*/200499; - public static final Version V_0_20_4 = new Version(V_0_20_4_ID, LUCENE_3_EMULATION_VERSION); - public static final int V_0_20_5_ID = /*00*/200599; - public static final Version V_0_20_5 = new Version(V_0_20_5_ID, LUCENE_3_EMULATION_VERSION); - public static final int V_0_20_6_ID = /*00*/200699; - public static final Version V_0_20_6 = new Version(V_0_20_6_ID, LUCENE_3_EMULATION_VERSION); - - public static final int V_0_90_0_Beta1_ID = /*00*/900001; - public static final Version V_0_90_0_Beta1 = new Version(V_0_90_0_Beta1_ID, org.apache.lucene.util.Version.LUCENE_4_1); - public static final int V_0_90_0_RC1_ID = /*00*/900051; - public static final Version V_0_90_0_RC1 = new Version(V_0_90_0_RC1_ID, org.apache.lucene.util.Version.LUCENE_4_1); - public static final int V_0_90_0_RC2_ID = /*00*/900052; - public static final Version V_0_90_0_RC2 = new Version(V_0_90_0_RC2_ID, org.apache.lucene.util.Version.LUCENE_4_2); - public static final int V_0_90_0_ID = /*00*/900099; - public static final Version V_0_90_0 = new Version(V_0_90_0_ID, org.apache.lucene.util.Version.LUCENE_4_2); - public static final int V_0_90_1_ID = /*00*/900199; - public static final Version V_0_90_1 = new Version(V_0_90_1_ID, org.apache.lucene.util.Version.LUCENE_4_3); - public static final int V_0_90_2_ID = /*00*/900299; - public static final Version V_0_90_2 = new Version(V_0_90_2_ID, org.apache.lucene.util.Version.LUCENE_4_3); - public static final int V_0_90_3_ID = /*00*/900399; - public static final Version V_0_90_3 = new Version(V_0_90_3_ID, org.apache.lucene.util.Version.LUCENE_4_4); - public static final int V_0_90_4_ID = /*00*/900499; - public static final Version V_0_90_4 = new Version(V_0_90_4_ID, org.apache.lucene.util.Version.LUCENE_4_4); - public static final int V_0_90_5_ID = /*00*/900599; - public static final Version V_0_90_5 = new Version(V_0_90_5_ID, org.apache.lucene.util.Version.LUCENE_4_4); - public static final int V_0_90_6_ID = /*00*/900699; - public static final Version V_0_90_6 = new Version(V_0_90_6_ID, org.apache.lucene.util.Version.LUCENE_4_5); - public static final int V_0_90_7_ID = /*00*/900799; - public static final Version V_0_90_7 = new Version(V_0_90_7_ID, org.apache.lucene.util.Version.LUCENE_4_5); - public static final int V_0_90_8_ID = /*00*/900899; - public static final Version V_0_90_8 = new Version(V_0_90_8_ID, org.apache.lucene.util.Version.LUCENE_4_6); - public static final int V_0_90_9_ID = /*00*/900999; - public static final Version V_0_90_9 = new Version(V_0_90_9_ID, org.apache.lucene.util.Version.LUCENE_4_6); - public static final int V_0_90_10_ID = /*00*/901099; - public static final Version V_0_90_10 = new Version(V_0_90_10_ID, org.apache.lucene.util.Version.LUCENE_4_6); - public static final int V_0_90_11_ID = /*00*/901199; - public static final Version V_0_90_11 = new Version(V_0_90_11_ID, org.apache.lucene.util.Version.LUCENE_4_6); - public static final int V_0_90_12_ID = /*00*/901299; - public static final Version V_0_90_12 = new Version(V_0_90_12_ID, org.apache.lucene.util.Version.LUCENE_4_6); - public static final int V_0_90_13_ID = /*00*/901399; - public static final Version V_0_90_13 = new Version(V_0_90_13_ID, org.apache.lucene.util.Version.LUCENE_4_6); - - public static final int V_1_0_0_Beta1_ID = 1000001; - public static final Version V_1_0_0_Beta1 = new Version(V_1_0_0_Beta1_ID, org.apache.lucene.util.Version.LUCENE_4_5); - public static final int V_1_0_0_Beta2_ID = 1000002; - public static final Version V_1_0_0_Beta2 = new Version(V_1_0_0_Beta2_ID, org.apache.lucene.util.Version.LUCENE_4_6); - public static final int V_1_0_0_RC1_ID = 1000051; - public static final Version V_1_0_0_RC1 = new Version(V_1_0_0_RC1_ID, org.apache.lucene.util.Version.LUCENE_4_6); - public static final int V_1_0_0_RC2_ID = 1000052; - public static final Version V_1_0_0_RC2 = new Version(V_1_0_0_RC2_ID, org.apache.lucene.util.Version.LUCENE_4_6); - public static final int V_1_0_0_ID = 1000099; - public static final Version V_1_0_0 = new Version(V_1_0_0_ID, org.apache.lucene.util.Version.LUCENE_4_6); - public static final int V_1_0_1_ID = 1000199; - public static final Version V_1_0_1 = new Version(V_1_0_1_ID, org.apache.lucene.util.Version.LUCENE_4_6); - public static final int V_1_0_2_ID = 1000299; - public static final Version V_1_0_2 = new Version(V_1_0_2_ID, org.apache.lucene.util.Version.LUCENE_4_6); - public static final int V_1_0_3_ID = 1000399; - public static final Version V_1_0_3 = new Version(V_1_0_3_ID, org.apache.lucene.util.Version.LUCENE_4_6); - public static final int V_1_1_0_ID = 1010099; - public static final Version V_1_1_0 = new Version(V_1_1_0_ID, org.apache.lucene.util.Version.LUCENE_4_7); - public static final int V_1_1_1_ID = 1010199; - public static final Version V_1_1_1 = new Version(V_1_1_1_ID, org.apache.lucene.util.Version.LUCENE_4_7); - public static final int V_1_1_2_ID = 1010299; - public static final Version V_1_1_2 = new Version(V_1_1_2_ID, org.apache.lucene.util.Version.LUCENE_4_7); - public static final int V_1_2_0_ID = 1020099; - public static final Version V_1_2_0 = new Version(V_1_2_0_ID, org.apache.lucene.util.Version.LUCENE_4_8); - public static final int V_1_2_1_ID = 1020199; - public static final Version V_1_2_1 = new Version(V_1_2_1_ID, org.apache.lucene.util.Version.LUCENE_4_8); - public static final int V_1_2_2_ID = 1020299; - public static final Version V_1_2_2 = new Version(V_1_2_2_ID, org.apache.lucene.util.Version.LUCENE_4_8); - public static final int V_1_2_3_ID = 1020399; - public static final Version V_1_2_3 = new Version(V_1_2_3_ID, org.apache.lucene.util.Version.LUCENE_4_8); - public static final int V_1_2_4_ID = 1020499; - public static final Version V_1_2_4 = new Version(V_1_2_4_ID, org.apache.lucene.util.Version.LUCENE_4_8); - public static final int V_1_3_0_ID = 1030099; - public static final Version V_1_3_0 = new Version(V_1_3_0_ID, org.apache.lucene.util.Version.LUCENE_4_9); - public static final int V_1_3_1_ID = 1030199; - public static final Version V_1_3_1 = new Version(V_1_3_1_ID, org.apache.lucene.util.Version.LUCENE_4_9); - public static final int V_1_3_2_ID = 1030299; - public static final Version V_1_3_2 = new Version(V_1_3_2_ID, org.apache.lucene.util.Version.LUCENE_4_9); - public static final int V_1_3_3_ID = 1030399; - public static final Version V_1_3_3 = new Version(V_1_3_3_ID, org.apache.lucene.util.Version.LUCENE_4_9); - public static final int V_1_3_4_ID = 1030499; - public static final Version V_1_3_4 = new Version(V_1_3_4_ID, org.apache.lucene.util.Version.LUCENE_4_9); - public static final int V_1_3_5_ID = 1030599; - public static final Version V_1_3_5 = new Version(V_1_3_5_ID, org.apache.lucene.util.Version.LUCENE_4_9); - public static final int V_1_3_6_ID = 1030699; - public static final Version V_1_3_6 = new Version(V_1_3_6_ID, org.apache.lucene.util.Version.LUCENE_4_9); - public static final int V_1_3_7_ID = 1030799; - public static final Version V_1_3_7 = new Version(V_1_3_7_ID, org.apache.lucene.util.Version.LUCENE_4_9); - public static final int V_1_3_8_ID = 1030899; - public static final Version V_1_3_8 = new Version(V_1_3_8_ID, org.apache.lucene.util.Version.LUCENE_4_9); - public static final int V_1_3_9_ID = 1030999; - public static final Version V_1_3_9 = new Version(V_1_3_9_ID, org.apache.lucene.util.Version.LUCENE_4_9); - public static final int V_1_4_0_Beta1_ID = 1040001; - public static final Version V_1_4_0_Beta1 = new Version(V_1_4_0_Beta1_ID, org.apache.lucene.util.Version.LUCENE_4_10_1); - public static final int V_1_4_0_ID = 1040099; - public static final Version V_1_4_0 = new Version(V_1_4_0_ID, org.apache.lucene.util.Version.LUCENE_4_10_2); - public static final int V_1_4_1_ID = 1040199; - public static final Version V_1_4_1 = new Version(V_1_4_1_ID, org.apache.lucene.util.Version.LUCENE_4_10_2); - public static final int V_1_4_2_ID = 1040299; - public static final Version V_1_4_2 = new Version(V_1_4_2_ID, org.apache.lucene.util.Version.LUCENE_4_10_2); - public static final int V_1_4_3_ID = 1040399; - public static final Version V_1_4_3 = new Version(V_1_4_3_ID, org.apache.lucene.util.Version.LUCENE_4_10_3); - public static final int V_1_4_4_ID = 1040499; - public static final Version V_1_4_4 = new Version(V_1_4_4_ID, org.apache.lucene.util.Version.LUCENE_4_10_3); - public static final int V_1_4_5_ID = 1040599; - public static final Version V_1_4_5 = new Version(V_1_4_5_ID, org.apache.lucene.util.Version.LUCENE_4_10_4); - public static final int V_1_5_0_ID = 1050099; - public static final Version V_1_5_0 = new Version(V_1_5_0_ID, org.apache.lucene.util.Version.LUCENE_4_10_4); - public static final int V_1_5_1_ID = 1050199; - public static final Version V_1_5_1 = new Version(V_1_5_1_ID, org.apache.lucene.util.Version.LUCENE_4_10_4); - public static final int V_1_5_2_ID = 1050299; - public static final Version V_1_5_2 = new Version(V_1_5_2_ID, org.apache.lucene.util.Version.LUCENE_4_10_4); - public static final int V_1_6_0_ID = 1060099; - public static final Version V_1_6_0 = new Version(V_1_6_0_ID, org.apache.lucene.util.Version.LUCENE_4_10_4); - public static final int V_1_6_1_ID = 1060199; - public static final Version V_1_6_1 = new Version(V_1_6_1_ID, org.apache.lucene.util.Version.LUCENE_4_10_4); - public static final int V_1_6_2_ID = 1060299; - public static final Version V_1_6_2 = new Version(V_1_6_2_ID, org.apache.lucene.util.Version.LUCENE_4_10_4); - public static final int V_1_7_0_ID = 1070099; - public static final Version V_1_7_0 = new Version(V_1_7_0_ID, org.apache.lucene.util.Version.LUCENE_4_10_4); - public static final int V_1_7_1_ID = 1070199; - public static final Version V_1_7_1 = new Version(V_1_7_1_ID, org.apache.lucene.util.Version.LUCENE_4_10_4); - public static final int V_1_7_2_ID = 1070299; - public static final Version V_1_7_2 = new Version(V_1_7_2_ID, org.apache.lucene.util.Version.LUCENE_4_10_4); - public static final int V_1_7_3_ID = 1070399; - public static final Version V_1_7_3 = new Version(V_1_7_3_ID, org.apache.lucene.util.Version.LUCENE_4_10_4); - public static final int V_1_7_4_ID = 1070499; - public static final Version V_1_7_4 = new Version(V_1_7_4_ID, org.apache.lucene.util.Version.LUCENE_4_10_4); - public static final int V_1_7_5_ID = 1070599; - public static final Version V_1_7_5 = new Version(V_1_7_5_ID, org.apache.lucene.util.Version.LUCENE_4_10_4); public static final int V_2_0_0_beta1_ID = 2000001; public static final Version V_2_0_0_beta1 = new Version(V_2_0_0_beta1_ID, org.apache.lucene.util.Version.LUCENE_5_2_1); @@ -262,10 +60,12 @@ public class Version { public static final Version V_2_1_2 = new Version(V_2_1_2_ID, org.apache.lucene.util.Version.LUCENE_5_3_1); public static final int V_2_2_0_ID = 2020099; public static final Version V_2_2_0 = new Version(V_2_2_0_ID, org.apache.lucene.util.Version.LUCENE_5_4_1); + public static final int V_2_2_1_ID = 2020199; + public static final Version V_2_2_1 = new Version(V_2_2_1_ID, org.apache.lucene.util.Version.LUCENE_5_4_1); public static final int V_2_3_0_ID = 2030099; public static final Version V_2_3_0 = new Version(V_2_3_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_0); public static final int V_5_0_0_ID = 5000099; - public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_0); + public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_6_0_0); public static final Version CURRENT = V_5_0_0; static { @@ -283,6 +83,8 @@ public class Version { return V_5_0_0; case V_2_3_0_ID: return V_2_3_0; + case V_2_2_1_ID: + return V_2_2_1; case V_2_2_0_ID: return V_2_2_0; case V_2_1_2_ID: @@ -303,198 +105,6 @@ public class Version { return V_2_0_0_beta2; case V_2_0_0_beta1_ID: return V_2_0_0_beta1; - case V_1_7_5_ID: - return V_1_7_5; - case V_1_7_4_ID: - return V_1_7_4; - case V_1_7_3_ID: - return V_1_7_3; - case V_1_7_2_ID: - return V_1_7_2; - case V_1_7_1_ID: - return V_1_7_1; - case V_1_7_0_ID: - return V_1_7_0; - case V_1_6_2_ID: - return V_1_6_2; - case V_1_6_1_ID: - return V_1_6_1; - case V_1_6_0_ID: - return V_1_6_0; - case V_1_5_2_ID: - return V_1_5_2; - case V_1_5_1_ID: - return V_1_5_1; - case V_1_5_0_ID: - return V_1_5_0; - case V_1_4_5_ID: - return V_1_4_5; - case V_1_4_4_ID: - return V_1_4_4; - case V_1_4_3_ID: - return V_1_4_3; - case V_1_4_2_ID: - return V_1_4_2; - case V_1_4_1_ID: - return V_1_4_1; - case V_1_4_0_ID: - return V_1_4_0; - case V_1_4_0_Beta1_ID: - return V_1_4_0_Beta1; - case V_1_3_9_ID: - return V_1_3_9; - case V_1_3_8_ID: - return V_1_3_8; - case V_1_3_7_ID: - return V_1_3_7; - case V_1_3_6_ID: - return V_1_3_6; - case V_1_3_5_ID: - return V_1_3_5; - case V_1_3_4_ID: - return V_1_3_4; - case V_1_3_3_ID: - return V_1_3_3; - case V_1_3_2_ID: - return V_1_3_2; - case V_1_3_1_ID: - return V_1_3_1; - case V_1_3_0_ID: - return V_1_3_0; - case V_1_2_4_ID: - return V_1_2_4; - case V_1_2_3_ID: - return V_1_2_3; - case V_1_2_2_ID: - return V_1_2_2; - case V_1_2_1_ID: - return V_1_2_1; - case V_1_2_0_ID: - return V_1_2_0; - case V_1_1_2_ID: - return V_1_1_2; - case V_1_1_1_ID: - return V_1_1_1; - case V_1_1_0_ID: - return V_1_1_0; - case V_1_0_3_ID: - return V_1_0_3; - case V_1_0_2_ID: - return V_1_0_2; - case V_1_0_1_ID: - return V_1_0_1; - case V_1_0_0_ID: - return V_1_0_0; - case V_1_0_0_RC2_ID: - return V_1_0_0_RC2; - case V_1_0_0_RC1_ID: - return V_1_0_0_RC1; - case V_1_0_0_Beta2_ID: - return V_1_0_0_Beta2; - case V_1_0_0_Beta1_ID: - return V_1_0_0_Beta1; - case V_0_90_13_ID: - return V_0_90_13; - case V_0_90_12_ID: - return V_0_90_12; - case V_0_90_11_ID: - return V_0_90_11; - case V_0_90_10_ID: - return V_0_90_10; - case V_0_90_9_ID: - return V_0_90_9; - case V_0_90_8_ID: - return V_0_90_8; - case V_0_90_7_ID: - return V_0_90_7; - case V_0_90_6_ID: - return V_0_90_6; - case V_0_90_5_ID: - return V_0_90_5; - case V_0_90_4_ID: - return V_0_90_4; - case V_0_90_3_ID: - return V_0_90_3; - case V_0_90_2_ID: - return V_0_90_2; - case V_0_90_1_ID: - return V_0_90_1; - case V_0_90_0_ID: - return V_0_90_0; - case V_0_90_0_RC2_ID: - return V_0_90_0_RC2; - case V_0_90_0_RC1_ID: - return V_0_90_0_RC1; - case V_0_90_0_Beta1_ID: - return V_0_90_0_Beta1; - case V_0_20_6_ID: - return V_0_20_6; - case V_0_20_5_ID: - return V_0_20_5; - case V_0_20_4_ID: - return V_0_20_4; - case V_0_20_3_ID: - return V_0_20_3; - case V_0_20_2_ID: - return V_0_20_2; - case V_0_20_1_ID: - return V_0_20_1; - case V_0_20_0_ID: - return V_0_20_0; - case V_0_20_0_RC1_ID: - return V_0_20_0_RC1; - case V_0_19_0_RC1_ID: - return V_0_19_0_RC1; - case V_0_19_0_RC2_ID: - return V_0_19_0_RC2; - case V_0_19_0_RC3_ID: - return V_0_19_0_RC3; - case V_0_19_0_ID: - return V_0_19_0; - case V_0_19_1_ID: - return V_0_19_1; - case V_0_19_2_ID: - return V_0_19_2; - case V_0_19_3_ID: - return V_0_19_3; - case V_0_19_4_ID: - return V_0_19_4; - case V_0_19_5_ID: - return V_0_19_5; - case V_0_19_6_ID: - return V_0_19_6; - case V_0_19_7_ID: - return V_0_19_7; - case V_0_19_8_ID: - return V_0_19_8; - case V_0_19_9_ID: - return V_0_19_9; - case V_0_19_10_ID: - return V_0_19_10; - case V_0_19_11_ID: - return V_0_19_11; - case V_0_19_12_ID: - return V_0_19_12; - case V_0_19_13_ID: - return V_0_19_13; - case V_0_18_0_ID: - return V_0_18_0; - case V_0_18_1_ID: - return V_0_18_1; - case V_0_18_2_ID: - return V_0_18_2; - case V_0_18_3_ID: - return V_0_18_3; - case V_0_18_4_ID: - return V_0_18_4; - case V_0_18_5_ID: - return V_0_18_5; - case V_0_18_6_ID: - return V_0_18_6; - case V_0_18_7_ID: - return V_0_18_7; - case V_0_18_8_ID: - return V_0_18_8; default: return new Version(id, org.apache.lucene.util.Version.LATEST); } @@ -531,15 +141,23 @@ public class Version { if (!Strings.hasLength(version)) { return Version.CURRENT; } + final boolean snapshot; // this is some BWC for 2.x and before indices + if (snapshot = version.endsWith("-SNAPSHOT")) { + version = version.substring(0, version.length() - 9); + } String[] parts = version.split("\\.|\\-"); if (parts.length < 3 || parts.length > 4) { throw new IllegalArgumentException("the version needs to contain major, minor, and revision, and optionally the build: " + version); } try { - + final int rawMajor = Integer.parseInt(parts[0]); + if (rawMajor >= 5 && snapshot) { // we don't support snapshot as part of the version here anymore + throw new IllegalArgumentException("illegal version format - snapshots are only supported until version 2.x"); + } + final int betaOffset = rawMajor < 5 ? 0 : 25; //we reverse the version id calculation based on some assumption as we can't reliably reverse the modulo - final int major = Integer.parseInt(parts[0]) * 1000000; + final int major = rawMajor * 1000000; final int minor = Integer.parseInt(parts[1]) * 10000; final int revision = Integer.parseInt(parts[2]) * 100; @@ -547,11 +165,17 @@ public class Version { int build = 99; if (parts.length == 4) { String buildStr = parts[3]; - if (buildStr.startsWith("Beta") || buildStr.startsWith("beta")) { - build = Integer.parseInt(buildStr.substring(4)); - } - if (buildStr.startsWith("RC") || buildStr.startsWith("rc")) { + if (buildStr.startsWith("alpha")) { + assert rawMajor >= 5 : "major must be >= 5 but was " + major; + build = Integer.parseInt(buildStr.substring(5)); + assert build < 25 : "expected a beta build but " + build + " >= 25"; + } else if (buildStr.startsWith("Beta") || buildStr.startsWith("beta")) { + build = betaOffset + Integer.parseInt(buildStr.substring(4)); + assert build < 50 : "expected a beta build but " + build + " >= 50"; + } else if (buildStr.startsWith("RC") || buildStr.startsWith("rc")) { build = Integer.parseInt(buildStr.substring(2)) + 50; + } else { + throw new IllegalArgumentException("unable to parse version " + version); } } @@ -614,13 +238,16 @@ public class Version { public String toString() { StringBuilder sb = new StringBuilder(); sb.append(major).append('.').append(minor).append('.').append(revision); - if (isBeta()) { + if (isAlpha()) { + sb.append("-alpha"); + sb.append(build); + } else if (isBeta()) { if (major >= 2) { sb.append("-beta"); } else { sb.append(".Beta"); } - sb.append(build); + sb.append(major < 5 ? build : build-25); } else if (build < 99) { if (major >= 2) { sb.append("-rc"); @@ -656,7 +283,16 @@ public class Version { } public boolean isBeta() { - return build < 50; + return major < 5 ? build < 50 : build >= 25 && build < 50; + } + + /** + * Returns true iff this version is an alpha version + * Note: This has been introduced in elasticsearch version 5. Previous versions will never + * have an alpha version. + */ + public boolean isAlpha() { + return major < 5 ? false : build < 25; } public boolean isRC() { diff --git a/core/src/main/java/org/elasticsearch/action/ActionModule.java b/core/src/main/java/org/elasticsearch/action/ActionModule.java index 491202e7c7a..be9387f9a8a 100644 --- a/core/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/core/src/main/java/org/elasticsearch/action/ActionModule.java @@ -165,7 +165,6 @@ import org.elasticsearch.action.percolate.MultiPercolateAction; import org.elasticsearch.action.percolate.PercolateAction; import org.elasticsearch.action.percolate.TransportMultiPercolateAction; import org.elasticsearch.action.percolate.TransportPercolateAction; -import org.elasticsearch.action.percolate.TransportShardMultiPercolateAction; import org.elasticsearch.action.search.ClearScrollAction; import org.elasticsearch.action.search.MultiSearchAction; import org.elasticsearch.action.search.SearchAction; @@ -331,7 +330,7 @@ public class ActionModule extends AbstractModule { registerAction(SearchScrollAction.INSTANCE, TransportSearchScrollAction.class); registerAction(MultiSearchAction.INSTANCE, TransportMultiSearchAction.class); registerAction(PercolateAction.INSTANCE, TransportPercolateAction.class); - registerAction(MultiPercolateAction.INSTANCE, TransportMultiPercolateAction.class, TransportShardMultiPercolateAction.class); + registerAction(MultiPercolateAction.INSTANCE, TransportMultiPercolateAction.class); registerAction(ExplainAction.INSTANCE, TransportExplainAction.class); registerAction(ClearScrollAction.INSTANCE, TransportClearScrollAction.class); registerAction(RecoveryAction.INSTANCE, TransportRecoveryAction.class); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java index ccae17b1eeb..946897a2c97 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java @@ -197,9 +197,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo numberOfPendingTasks = in.readInt(); timedOut = in.readBoolean(); numberOfInFlightFetch = in.readInt(); - if (in.getVersion().onOrAfter(Version.V_1_7_0)) { - delayedUnassignedShards= in.readInt(); - } + delayedUnassignedShards= in.readInt(); taskMaxWaitingTime = TimeValue.readTimeValue(in); } @@ -212,9 +210,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo out.writeInt(numberOfPendingTasks); out.writeBoolean(timedOut); out.writeInt(numberOfInFlightFetch); - if (out.getVersion().onOrAfter(Version.V_1_7_0)) { - out.writeInt(delayedUnassignedShards); - } + out.writeInt(delayedUnassignedShards); taskMaxWaitingTime.writeTo(out); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java index b5c9577aff7..069f0ebe1b8 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -24,7 +24,6 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.ClusterStateUpdateTask; @@ -32,6 +31,7 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -213,7 +213,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction< } if (request.indices() != null && request.indices().length > 0) { try { - indexNameExpressionResolver.concreteIndices(clusterState, IndicesOptions.strictExpand(), request.indices()); + indexNameExpressionResolver.concreteIndexNames(clusterState, IndicesOptions.strictExpand(), request.indices()); waitForCounter++; } catch (IndexNotFoundException e) { response.setStatus(ClusterHealthStatus.RED); // no indices, make sure its RED @@ -280,7 +280,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction< String[] concreteIndices; try { - concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request); + concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request); } catch (IndexNotFoundException e) { // one of the specified indices is not there - treat it as RED. ClusterHealthResponse response = new ClusterHealthResponse(clusterName.value(), Strings.EMPTY_ARRAY, clusterState, diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java index c743a1d2a91..d53f651da45 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java @@ -24,8 +24,8 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.nodes.BaseNodeRequest; import org.elasticsearch.action.support.nodes.TransportNodesAction; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java index 1fa64d5e7b7..87ec2d052ab 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.http.HttpInfo; +import org.elasticsearch.ingest.core.IngestInfo; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.monitor.os.OsInfo; import org.elasticsearch.monitor.process.ProcessInfo; @@ -74,12 +75,15 @@ public class NodeInfo extends BaseNodeResponse { @Nullable private PluginsAndModules plugins; - NodeInfo() { + @Nullable + private IngestInfo ingest; + + public NodeInfo() { } public NodeInfo(Version version, Build build, DiscoveryNode node, @Nullable Map serviceAttributes, @Nullable Settings settings, @Nullable OsInfo os, @Nullable ProcessInfo process, @Nullable JvmInfo jvm, @Nullable ThreadPoolInfo threadPool, - @Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsAndModules plugins) { + @Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsAndModules plugins, @Nullable IngestInfo ingest) { super(node); this.version = version; this.build = build; @@ -92,6 +96,7 @@ public class NodeInfo extends BaseNodeResponse { this.transport = transport; this.http = http; this.plugins = plugins; + this.ingest = ingest; } /** @@ -176,6 +181,11 @@ public class NodeInfo extends BaseNodeResponse { return this.plugins; } + @Nullable + public IngestInfo getIngest() { + return ingest; + } + public static NodeInfo readNodeInfo(StreamInput in) throws IOException { NodeInfo nodeInfo = new NodeInfo(); nodeInfo.readFrom(in); @@ -220,6 +230,9 @@ public class NodeInfo extends BaseNodeResponse { plugins = new PluginsAndModules(); plugins.readFrom(in); } + if (in.readBoolean()) { + ingest = new IngestInfo(in); + } } @Override @@ -285,5 +298,11 @@ public class NodeInfo extends BaseNodeResponse { out.writeBoolean(true); plugins.writeTo(out); } + if (ingest == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + ingest.writeTo(out); + } } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java index 46a36f1d8a3..66c5cfd65d4 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java @@ -38,6 +38,7 @@ public class NodesInfoRequest extends BaseNodesRequest { private boolean transport = true; private boolean http = true; private boolean plugins = true; + private boolean ingest = true; public NodesInfoRequest() { } @@ -62,6 +63,7 @@ public class NodesInfoRequest extends BaseNodesRequest { transport = false; http = false; plugins = false; + ingest = false; return this; } @@ -77,6 +79,7 @@ public class NodesInfoRequest extends BaseNodesRequest { transport = true; http = true; plugins = true; + ingest = true; return this; } @@ -202,6 +205,22 @@ public class NodesInfoRequest extends BaseNodesRequest { return plugins; } + /** + * Should information about ingest be returned + * @param ingest true if you want info + */ + public NodesInfoRequest ingest(boolean ingest) { + this.ingest = ingest; + return this; + } + + /** + * @return true if information about ingest is requested + */ + public boolean ingest() { + return ingest; + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); @@ -213,6 +232,7 @@ public class NodesInfoRequest extends BaseNodesRequest { transport = in.readBoolean(); http = in.readBoolean(); plugins = in.readBoolean(); + ingest = in.readBoolean(); } @Override @@ -226,5 +246,6 @@ public class NodesInfoRequest extends BaseNodesRequest { out.writeBoolean(transport); out.writeBoolean(http); out.writeBoolean(plugins); + out.writeBoolean(ingest); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java index d73b3d47dfb..fc484012379 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java @@ -110,4 +110,12 @@ public class NodesInfoRequestBuilder extends NodesOperationRequestBuilder implements To if (nodeInfo.getPlugins() != null) { nodeInfo.getPlugins().toXContent(builder, params); } + if (nodeInfo.getIngest() != null) { + nodeInfo.getIngest().toXContent(builder, params); + } builder.endObject(); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java index 2a763910ddd..f52729faa4f 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java @@ -23,8 +23,8 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.nodes.BaseNodeRequest; import org.elasticsearch.action.support.nodes.TransportNodesAction; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -80,7 +80,7 @@ public class TransportNodesInfoAction extends TransportNodesAction new DiscoveryStats(null)); - + ingestStats = in.readOptionalWritable(IngestStats::new); } @Override @@ -282,6 +293,7 @@ public class NodeStats extends BaseNodeResponse implements ToXContent { out.writeOptionalStreamable(breaker); out.writeOptionalStreamable(scriptStats); out.writeOptionalStreamable(discoveryStats); + out.writeOptionalWriteable(ingestStats); } @Override @@ -337,6 +349,10 @@ public class NodeStats extends BaseNodeResponse implements ToXContent { getDiscoveryStats().toXContent(builder, params); } + if (getIngestStats() != null) { + getIngestStats().toXContent(builder, params); + } + return builder; } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java index 5916421c1ed..88162a617a8 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java @@ -42,6 +42,7 @@ public class NodesStatsRequest extends BaseNodesRequest { private boolean breaker; private boolean script; private boolean discovery; + private boolean ingest; public NodesStatsRequest() { } @@ -69,6 +70,7 @@ public class NodesStatsRequest extends BaseNodesRequest { this.breaker = true; this.script = true; this.discovery = true; + this.ingest = true; return this; } @@ -87,6 +89,7 @@ public class NodesStatsRequest extends BaseNodesRequest { this.breaker = false; this.script = false; this.discovery = false; + this.ingest = false; return this; } @@ -250,6 +253,17 @@ public class NodesStatsRequest extends BaseNodesRequest { return this; } + public boolean ingest() { + return ingest; + } + + /** + * Should ingest statistics be returned. + */ + public NodesStatsRequest ingest(boolean ingest) { + this.ingest = ingest; + return this; + } @Override public void readFrom(StreamInput in) throws IOException { @@ -265,6 +279,7 @@ public class NodesStatsRequest extends BaseNodesRequest { breaker = in.readBoolean(); script = in.readBoolean(); discovery = in.readBoolean(); + ingest = in.readBoolean(); } @Override @@ -281,6 +296,7 @@ public class NodesStatsRequest extends BaseNodesRequest { out.writeBoolean(breaker); out.writeBoolean(script); out.writeBoolean(discovery); + out.writeBoolean(ingest); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java index dc35eefee7d..027e6122681 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java @@ -137,4 +137,12 @@ public class NodesStatsRequestBuilder extends NodesOperationRequestBuilder { return super.match(task) && task instanceof CancellableTask; } - public CancelTasksRequest reason(String reason) { + /** + * Set the reason for canceling the task. + */ + public CancelTasksRequest setReason(String reason) { this.reason = reason; return this; } - public String reason() { + /** + * The reason for canceling the task. + */ + public String getReason() { return reason; } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java index b07e540d792..336f4c84596 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java @@ -26,10 +26,10 @@ import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskInfo; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.tasks.TransportTasksAction; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -84,21 +84,21 @@ public class TransportCancelTasksAction extends TransportTasksAction operation) { - if (request.taskId().isSet() == false) { + if (request.getTaskId().isSet() == false) { // we are only checking one task, we can optimize it - CancellableTask task = taskManager.getCancellableTask(request.taskId().getId()); + CancellableTask task = taskManager.getCancellableTask(request.getTaskId().getId()); if (task != null) { if (request.match(task)) { operation.accept(task); } else { - throw new IllegalArgumentException("task [" + request.taskId() + "] doesn't support this operation"); + throw new IllegalArgumentException("task [" + request.getTaskId() + "] doesn't support this operation"); } } else { - if (taskManager.getTask(request.taskId().getId()) != null) { + if (taskManager.getTask(request.getTaskId().getId()) != null) { // The task exists, but doesn't support cancellation - throw new IllegalArgumentException("task [" + request.taskId() + "] doesn't support cancellation"); + throw new IllegalArgumentException("task [" + request.getTaskId() + "] doesn't support cancellation"); } else { - throw new ResourceNotFoundException("task [{}] doesn't support cancellation", request.taskId()); + throw new ResourceNotFoundException("task [{}] doesn't support cancellation", request.getTaskId()); } } } else { @@ -113,14 +113,14 @@ public class TransportCancelTasksAction extends TransportTasksAction removeBanOnNodes(cancellableTask, nodes)); - Set childNodes = taskManager.cancel(cancellableTask, request.reason(), banLock::onTaskFinished); + Set childNodes = taskManager.cancel(cancellableTask, request.getReason(), banLock::onTaskFinished); if (childNodes != null) { if (childNodes.isEmpty()) { logger.trace("cancelling task {} with no children", cancellableTask.getId()); return cancellableTask.taskInfo(clusterService.localNode(), false); } else { logger.trace("cancelling task {} with children on nodes [{}]", cancellableTask.getId(), childNodes); - setBanOnNodes(request.reason(), cancellableTask, childNodes, banLock); + setBanOnNodes(request.getReason(), cancellableTask, childNodes, banLock); return cancellableTask.taskInfo(clusterService.localNode(), false); } } else { diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java index 6bf8ac3e1ef..3fe743fc36a 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java @@ -31,31 +31,49 @@ import java.io.IOException; public class ListTasksRequest extends BaseTasksRequest { private boolean detailed = false; + private boolean waitForCompletion = false; /** * Should the detailed task information be returned. */ - public boolean detailed() { + public boolean getDetailed() { return this.detailed; } /** * Should the detailed task information be returned. */ - public ListTasksRequest detailed(boolean detailed) { + public ListTasksRequest setDetailed(boolean detailed) { this.detailed = detailed; return this; } + /** + * Should this request wait for all found tasks to complete? + */ + public boolean getWaitForCompletion() { + return waitForCompletion; + } + + /** + * Should this request wait for all found tasks to complete? + */ + public ListTasksRequest setWaitForCompletion(boolean waitForCompletion) { + this.waitForCompletion = waitForCompletion; + return this; + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); detailed = in.readBoolean(); + waitForCompletion = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeBoolean(detailed); + out.writeBoolean(waitForCompletion); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequestBuilder.java index 2b462014f43..1385781125a 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequestBuilder.java @@ -35,7 +35,15 @@ public class ListTasksRequestBuilder extends TasksRequestBuilder { + private static final TimeValue WAIT_FOR_COMPLETION_POLL = timeValueMillis(100); + private static final TimeValue DEFAULT_WAIT_FOR_COMPLETION_TIMEOUT = timeValueSeconds(30); @Inject public TransportListTasksAction(Settings settings, ClusterName clusterName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { @@ -59,7 +67,34 @@ public class TransportListTasksAction extends TransportTasksAction operation) { + if (false == request.getWaitForCompletion()) { + super.processTasks(request, operation); + return; + } + // If we should wait for completion then we have to intercept every found task and wait for it to leave the manager. + TimeValue timeout = request.getTimeout(); + if (timeout == null) { + timeout = DEFAULT_WAIT_FOR_COMPLETION_TIMEOUT; + } + long timeoutTime = System.nanoTime() + timeout.nanos(); + super.processTasks(request, operation.andThen((Task t) -> { + while (System.nanoTime() - timeoutTime < 0) { + if (taskManager.getTask(t.getId()) == null) { + return; + } + try { + Thread.sleep(WAIT_FOR_COMPLETION_POLL.millis()); + } catch (InterruptedException e) { + throw new ElasticsearchException("Interrupted waiting for completion of [{}]", e, t); + } + } + throw new ElasticsearchTimeoutException("Timed out waiting for completion of [{}]", t); + })); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java index 79e51f9a46e..a17d2aac892 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java @@ -22,12 +22,12 @@ package org.elasticsearch.action.admin.cluster.repositories.delete; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.RepositoriesService; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java index 39d9cacbda3..490d20f086c 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java @@ -22,7 +22,6 @@ package org.elasticsearch.action.admin.cluster.repositories.get; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -30,6 +29,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.RepositoriesMetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java index efc45f16cbd..d1639001352 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java @@ -22,12 +22,12 @@ package org.elasticsearch.action.admin.cluster.repositories.put; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.RepositoriesService; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java index 8b1d9816004..2c75335dcaa 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java @@ -23,11 +23,11 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.RepositoriesService; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java index d7ec84fb7a5..e6116dbfbc4 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java @@ -23,7 +23,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -31,6 +30,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingExplanations; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index 75f94921e61..60a0e7a8046 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -24,7 +24,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -33,6 +32,7 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.inject.Inject; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java index 9f3ccac8f64..1b329d17289 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java @@ -67,7 +67,7 @@ public class ClusterSearchShardsGroup implements Streamable, ToXContent { @Override public void readFrom(StreamInput in) throws IOException { - index = Index.readIndex(in); + index = new Index(in); shardId = in.readVInt(); shards = new ShardRouting[in.readVInt()]; for (int i = 0; i < shards.length; i++) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java index e6a9d98eb17..8b26fd6c04f 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java @@ -22,7 +22,6 @@ package org.elasticsearch.action.admin.cluster.shards; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -31,6 +30,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; @@ -59,7 +59,7 @@ public class TransportClusterSearchShardsAction extends TransportMasterNodeReadA @Override protected ClusterBlockException checkBlock(ClusterSearchShardsRequest request, ClusterState state) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, request)); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request)); } @Override @@ -70,7 +70,7 @@ public class TransportClusterSearchShardsAction extends TransportMasterNodeReadA @Override protected void masterOperation(final ClusterSearchShardsRequest request, final ClusterState state, final ActionListener listener) { ClusterState clusterState = clusterService.state(); - String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request); + String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request); Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(state, request.routing(), request.indices()); Set nodeIds = new HashSet<>(); GroupShardsIterator groupShardsIterator = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference()); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java index 457b6e69383..2654ac0c269 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java @@ -22,12 +22,12 @@ package org.elasticsearch.action.admin.cluster.snapshots.create; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.SnapshotId; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.snapshots.SnapshotInfo; @@ -66,7 +66,7 @@ public class TransportCreateSnapshotAction extends TransportMasterNodeAction 0) { - String[] indices = indexNameExpressionResolver.concreteIndices(currentState, request); + String[] indices = indexNameExpressionResolver.concreteIndexNames(currentState, request); for (String filteredIndex : indices) { IndexMetaData indexMetaData = currentState.metaData().index(filteredIndex); if (indexMetaData != null) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java index ec7017160c0..f8304bf76a9 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java @@ -31,7 +31,7 @@ import org.elasticsearch.common.xcontent.XContentBuilderString; import org.elasticsearch.index.cache.query.QueryCacheStats; import org.elasticsearch.index.engine.SegmentsStats; import org.elasticsearch.index.fielddata.FieldDataStats; -import org.elasticsearch.index.percolator.PercolateStats; +import org.elasticsearch.index.percolator.PercolatorQueryCacheStats; import org.elasticsearch.index.shard.DocsStats; import org.elasticsearch.index.store.StoreStats; import org.elasticsearch.search.suggest.completion.CompletionStats; @@ -48,7 +48,7 @@ public class ClusterStatsIndices implements ToXContent, Streamable { private QueryCacheStats queryCache; private CompletionStats completion; private SegmentsStats segments; - private PercolateStats percolate; + private PercolatorQueryCacheStats percolatorCache; private ClusterStatsIndices() { } @@ -62,7 +62,7 @@ public class ClusterStatsIndices implements ToXContent, Streamable { this.queryCache = new QueryCacheStats(); this.completion = new CompletionStats(); this.segments = new SegmentsStats(); - this.percolate = new PercolateStats(); + this.percolatorCache = new PercolatorQueryCacheStats(); for (ClusterStatsNodeResponse r : nodeResponses) { for (org.elasticsearch.action.admin.indices.stats.ShardStats shardStats : r.shardsStats()) { @@ -85,7 +85,7 @@ public class ClusterStatsIndices implements ToXContent, Streamable { queryCache.add(shardCommonStats.queryCache); completion.add(shardCommonStats.completion); segments.add(shardCommonStats.segments); - percolate.add(shardCommonStats.percolate); + percolatorCache.add(shardCommonStats.percolatorCache); } } @@ -128,8 +128,8 @@ public class ClusterStatsIndices implements ToXContent, Streamable { return segments; } - public PercolateStats getPercolate() { - return percolate; + public PercolatorQueryCacheStats getPercolatorCache() { + return percolatorCache; } @Override @@ -142,7 +142,7 @@ public class ClusterStatsIndices implements ToXContent, Streamable { queryCache = QueryCacheStats.readQueryCacheStats(in); completion = CompletionStats.readCompletionStats(in); segments = SegmentsStats.readSegmentsStats(in); - percolate = PercolateStats.readPercolateStats(in); + percolatorCache = PercolatorQueryCacheStats.readPercolateStats(in); } @Override @@ -155,7 +155,7 @@ public class ClusterStatsIndices implements ToXContent, Streamable { queryCache.writeTo(out); completion.writeTo(out); segments.writeTo(out); - percolate.writeTo(out); + percolatorCache.writeTo(out); } public static ClusterStatsIndices readIndicesStats(StreamInput in) throws IOException { @@ -178,7 +178,7 @@ public class ClusterStatsIndices implements ToXContent, Streamable { queryCache.toXContent(builder, params); completion.toXContent(builder, params); segments.toXContent(builder, params); - percolate.toXContent(builder, params); + percolatorCache.toXContent(builder, params); return builder; } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java index 0c883ccb377..6d1614eb485 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -28,10 +28,10 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.nodes.BaseNodeRequest; import org.elasticsearch.action.support.nodes.TransportNodesAction; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.health.ClusterStateHealth; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -56,7 +56,7 @@ public class TransportClusterStatsAction extends TransportNodesAction shardsStats = new ArrayList<>(); for (IndexService indexService : indicesService) { for (IndexShard indexShard : indexService) { if (indexShard.routingEntry() != null && indexShard.routingEntry().active()) { // only report on fully started shards - shardsStats.add(new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexShard, SHARD_STATS_FLAGS), indexShard.commitStats())); + shardsStats.add(new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexService.cache().getPercolatorQueryCache(), indexShard, SHARD_STATS_FLAGS), indexShard.commitStats())); } } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java index 105d596bad8..370b668f659 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java @@ -22,11 +22,11 @@ package org.elasticsearch.action.admin.cluster.tasks; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.PendingClusterTask; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java index d37053e056b..218b84e68ae 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java @@ -23,7 +23,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -31,6 +30,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.AliasAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaDataIndexAliasesService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.action.admin.indices.alias.delete.AliasesNotFoundException; @@ -90,11 +90,11 @@ public class TransportIndicesAliasesAction extends TransportMasterNodeAction aliases = new HashSet<>(); for (AliasActions action : actions) { //expand indices - String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request.indicesOptions(), action.indices()); + String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request.indicesOptions(), action.indices()); //collect the aliases Collections.addAll(aliases, action.aliases()); for (String index : concreteIndices) { - for (String alias : action.concreteAliases(state.metaData(), index)) { + for (String alias : action.concreteAliases(state.metaData(), index)) { AliasAction finalAction = new AliasAction(action.aliasAction()); finalAction.index(index); finalAction.alias(alias); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java index 5f92587f138..8ca09dbb67e 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java @@ -22,11 +22,11 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; @@ -50,7 +50,7 @@ public class TransportAliasesExistAction extends TransportMasterNodeReadAction listener) { - String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); + String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request); boolean result = state.metaData().hasAliases(request.aliases(), concreteIndices); listener.onResponse(new AliasesExistResponse(result)); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java index 9c2c2f03b57..061f916c2e0 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java @@ -21,12 +21,12 @@ package org.elasticsearch.action.admin.indices.alias.get; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -53,7 +53,7 @@ public class TransportGetAliasesAction extends TransportMasterNodeReadAction listener) { - String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); + String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request); @SuppressWarnings("unchecked") ImmutableOpenMap> result = (ImmutableOpenMap) state.metaData().findAliases(request.aliases(), concreteIndices); listener.onResponse(new GetAliasesResponse(result)); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java index 0541ac31505..0edae5eb1bc 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java @@ -32,11 +32,11 @@ import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.single.shard.TransportSingleShardAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.FastStringReader; import org.elasticsearch.common.settings.Settings; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java index bc229d72b1b..59cd95044cc 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java @@ -22,13 +22,13 @@ package org.elasticsearch.action.admin.indices.cache.clear; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; @@ -77,7 +77,7 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAc @Override protected EmptyResult shardOperation(ClearIndicesCacheRequest request, ShardRouting shardRouting) { - IndexService service = indicesService.indexService(shardRouting.getIndexName()); + IndexService service = indicesService.indexService(shardRouting.index()); if (service != null) { IndexShard shard = service.getShardOrNull(shardRouting.id()); boolean clearedAtLeastOne = false; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java index f8bbebf7db8..4fbfc7e72ab 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java @@ -23,17 +23,19 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -46,7 +48,8 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction CLUSTER_INDICES_CLOSE_ENABLE_SETTING = Setting.boolSetting("cluster.indices.close.enable", true, true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_INDICES_CLOSE_ENABLE_SETTING = + Setting.boolSetting("cluster.indices.close.enable", true, Property.Dynamic, Property.NodeScope); @Inject public TransportCloseIndexAction(Settings settings, TransportService transportService, ClusterService clusterService, @@ -86,12 +89,12 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction listener) { - final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); + final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); CloseIndexClusterStateUpdateRequest updateRequest = new CloseIndexClusterStateUpdateRequest() .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()) .indices(concreteIndices); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java index 98a002cc2fb..7b47a46a236 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java @@ -22,13 +22,13 @@ package org.elasticsearch.action.admin.indices.create; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.indices.IndexAlreadyExistsException; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java index 28bf46f798f..489001d9b89 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java @@ -23,18 +23,23 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaDataDeleteIndexService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; + /** * Delete index action. */ @@ -70,13 +75,13 @@ public class TransportDeleteIndexAction extends TransportMasterNodeAction listener) { - final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); - if (concreteIndices.length == 0) { + final Set concreteIndices = new HashSet<>(Arrays.asList(indexNameExpressionResolver.concreteIndices(state, request))); + if (concreteIndices.isEmpty()) { listener.onResponse(new DeleteIndexResponse(true)); return; } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java index acda370d7ff..c451e50b77c 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java @@ -23,11 +23,11 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexNotFoundException; @@ -60,7 +60,7 @@ public class TransportIndicesExistsAction extends TransportMasterNodeReadAction< protected ClusterBlockException checkBlock(IndicesExistsRequest request, ClusterState state) { //make sure through indices options that the concrete indices call never throws IndexMissingException IndicesOptions indicesOptions = IndicesOptions.fromOptions(true, true, request.indicesOptions().expandWildcardsOpen(), request.indicesOptions().expandWildcardsClosed()); - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, indicesOptions, request.indices())); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, indicesOptions, request.indices())); } @Override @@ -68,7 +68,7 @@ public class TransportIndicesExistsAction extends TransportMasterNodeReadAction< boolean exists; try { // Similar as the previous behaviour, but now also aliases and wildcards are supported. - indexNameExpressionResolver.concreteIndices(state, request); + indexNameExpressionResolver.concreteIndexNames(state, request); exists = true; } catch (IndexNotFoundException e) { exists = false; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java index 2fd92451752..e1cf5be1aca 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java @@ -21,12 +21,12 @@ package org.elasticsearch.action.admin.indices.exists.types; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -57,12 +57,12 @@ public class TransportTypesExistsAction extends TransportMasterNodeReadAction listener) { - String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request.indicesOptions(), request.indices()); + String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request.indicesOptions(), request.indices()); if (concreteIndices.length == 0) { listener.onResponse(new TypesExistsResponse(false)); return; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java index d2a8f1abcbf..8bb124d8fc4 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java @@ -23,8 +23,8 @@ import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.ShardId; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java index 302bdafc471..3c22209813f 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -22,12 +22,11 @@ package org.elasticsearch.action.admin.indices.flush; import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.replication.TransportReplicationAction; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -46,10 +45,9 @@ public class TransportShardFlushAction extends TransportReplicationAction listener) { ClusterState clusterState = clusterService.state(); - String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request); + String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request); final AtomicInteger indexCounter = new AtomicInteger(); final AtomicInteger completionCounter = new AtomicInteger(concreteIndices.length); final AtomicReferenceArray indexResponses = new AtomicReferenceArray<>(concreteIndices.length); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java index 3d11df97dee..e886af25fbb 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java @@ -23,12 +23,12 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse.FieldMappingMetaData; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.single.shard.TransportSingleShardAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java index bff9b4e2ab6..293f5a0e677 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java @@ -22,12 +22,12 @@ package org.elasticsearch.action.admin.indices.mapping.get; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.info.TransportClusterInfoAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -52,7 +52,7 @@ public class TransportGetMappingsAction extends TransportClusterInfoAction im private String source; private boolean updateAllTypes = false; + private Index concreteIndex; public PutMappingRequest() { } @@ -90,6 +94,10 @@ public class PutMappingRequest extends AcknowledgedRequest im } else if (source.isEmpty()) { validationException = addValidationError("mapping source is empty", validationException); } + if (concreteIndex != null && (indices != null && indices.length > 0)) { + validationException = addValidationError("either concrete index or unresolved indices can be set, concrete index: [" + + concreteIndex + "] and indices: " + Arrays.asList(indices) , validationException); + } return validationException; } @@ -102,6 +110,22 @@ public class PutMappingRequest extends AcknowledgedRequest im return this; } + /** + * Sets a concrete index for this put mapping request. + */ + public PutMappingRequest setConcreteIndex(Index index) { + Objects.requireNonNull(indices, "index must not be null"); + this.concreteIndex = index; + return this; + } + + /** + * Returns a concrete index for this mapping or null if no concrete index is defined + */ + public Index getConcreteIndex() { + return concreteIndex; + } + /** * The indices the mappings will be put. */ @@ -259,6 +283,7 @@ public class PutMappingRequest extends AcknowledgedRequest im source = in.readString(); updateAllTypes = in.readBoolean(); readTimeout(in); + concreteIndex = in.readOptionalWritable(Index::new); } @Override @@ -270,5 +295,6 @@ public class PutMappingRequest extends AcknowledgedRequest im out.writeString(source); out.writeBoolean(updateAllTypes); writeTimeout(out); + out.writeOptionalWriteable(concreteIndex); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java index 28f289b86c6..c21c40cf041 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.Index; import java.util.Map; @@ -40,6 +41,11 @@ public class PutMappingRequestBuilder extends AcknowledgedRequestBuilder diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java index b82c5d3a626..46535350154 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java @@ -22,15 +22,16 @@ package org.elasticsearch.action.admin.indices.mapping.put; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaDataMappingService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -63,13 +64,19 @@ public class TransportPutMappingAction extends TransportMasterNodeAction listener) { try { - final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); + final Index[] concreteIndices = request.getConcreteIndex() == null ? indexNameExpressionResolver.concreteIndices(state, request) : new Index[] {request.getConcreteIndex()}; PutMappingClusterStateUpdateRequest updateRequest = new PutMappingClusterStateUpdateRequest() .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()) .indices(concreteIndices).type(request.type()) diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java index 7ffb30b9534..50e79036694 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java @@ -23,15 +23,16 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -73,12 +74,12 @@ public class TransportOpenIndexAction extends TransportMasterNodeAction listener) { - final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); + final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); OpenIndexClusterStateUpdateRequest updateRequest = new OpenIndexClusterStateUpdateRequest() .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()) .indices(concreteIndices); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java index 8590fc210a0..01f37527374 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java @@ -22,13 +22,13 @@ package org.elasticsearch.action.admin.indices.recovery; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java index bd879e0eaa9..34bf39daabd 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java @@ -24,8 +24,8 @@ import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.replication.BasicReplicationRequest; import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.ShardId; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java index 2dd41f7801d..e3155614337 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java @@ -23,12 +23,11 @@ import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.replication.BasicReplicationRequest; import org.elasticsearch.action.support.replication.TransportReplicationAction; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -48,10 +47,9 @@ public class TransportShardRefreshAction extends TransportReplicationAction listener) { - String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); + Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); ImmutableOpenMap.Builder indexToSettingsBuilder = ImmutableOpenMap.builder(); - for (String concreteIndex : concreteIndices) { + for (Index concreteIndex : concreteIndices) { IndexMetaData indexMetaData = state.getMetaData().index(concreteIndex); if (indexMetaData == null) { continue; @@ -93,7 +94,7 @@ public class TransportGetSettingsAction extends TransportMasterNodeReadAction listener) { - final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); + final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); UpdateSettingsClusterStateUpdateRequest clusterStateUpdateRequest = new UpdateSettingsClusterStateUpdateRequest() .indices(concreteIndices) .settings(request.settings()) diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java index 5e22bc89144..bf4dbd3359e 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java @@ -23,7 +23,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -38,6 +37,7 @@ import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenIntMap; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; @@ -87,7 +87,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc protected void masterOperation(IndicesShardStoresRequest request, ClusterState state, ActionListener listener) { final RoutingTable routingTables = state.routingTable(); final RoutingNodes routingNodes = state.getRoutingNodes(); - final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); + final String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request); final Set shardIdsToFetch = new HashSet<>(); logger.trace("using cluster state version [{}] to determine shards", state.version()); @@ -115,7 +115,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc @Override protected ClusterBlockException checkBlock(IndicesShardStoresRequest request, ClusterState state) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, request)); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request)); } private class AsyncShardStoresInfoFetches { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java index 676eec104b1..b6ee76b16ed 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java @@ -32,9 +32,10 @@ import org.elasticsearch.index.engine.SegmentsStats; import org.elasticsearch.index.fielddata.FieldDataStats; import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.get.GetStats; +import org.elasticsearch.index.percolator.PercolatorQueryCache; import org.elasticsearch.index.shard.IndexingStats; import org.elasticsearch.index.merge.MergeStats; -import org.elasticsearch.index.percolator.PercolateStats; +import org.elasticsearch.index.percolator.PercolatorQueryCacheStats; import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.refresh.RefreshStats; import org.elasticsearch.index.search.stats.SearchStats; @@ -101,8 +102,8 @@ public class CommonStats implements Streamable, ToXContent { case Segments: segments = new SegmentsStats(); break; - case Percolate: - percolate = new PercolateStats(); + case PercolatorCache: + percolatorCache = new PercolatorQueryCacheStats(); break; case Translog: translog = new TranslogStats(); @@ -123,7 +124,8 @@ public class CommonStats implements Streamable, ToXContent { } - public CommonStats(IndicesQueryCache indicesQueryCache, IndexShard indexShard, CommonStatsFlags flags) { + public CommonStats(IndicesQueryCache indicesQueryCache, PercolatorQueryCache percolatorQueryCache, + IndexShard indexShard, CommonStatsFlags flags) { CommonStatsFlags.Flag[] setFlags = flags.getFlags(); @@ -168,8 +170,8 @@ public class CommonStats implements Streamable, ToXContent { case Segments: segments = indexShard.segmentStats(flags.includeSegmentFileSizes()); break; - case Percolate: - percolate = indexShard.percolateStats(); + case PercolatorCache: + percolatorCache = percolatorQueryCache.getStats(indexShard.shardId()); break; case Translog: translog = indexShard.translogStats(); @@ -223,7 +225,7 @@ public class CommonStats implements Streamable, ToXContent { public FieldDataStats fieldData; @Nullable - public PercolateStats percolate; + public PercolatorQueryCacheStats percolatorCache; @Nullable public CompletionStats completion; @@ -333,13 +335,13 @@ public class CommonStats implements Streamable, ToXContent { } else { fieldData.add(stats.getFieldData()); } - if (percolate == null) { - if (stats.getPercolate() != null) { - percolate = new PercolateStats(); - percolate.add(stats.getPercolate()); + if (percolatorCache == null) { + if (stats.getPercolatorCache() != null) { + percolatorCache = new PercolatorQueryCacheStats(); + percolatorCache.add(stats.getPercolatorCache()); } } else { - percolate.add(stats.getPercolate()); + percolatorCache.add(stats.getPercolatorCache()); } if (completion == null) { if (stats.getCompletion() != null) { @@ -447,8 +449,8 @@ public class CommonStats implements Streamable, ToXContent { } @Nullable - public PercolateStats getPercolate() { - return percolate; + public PercolatorQueryCacheStats getPercolatorCache() { + return percolatorCache; } @Nullable @@ -489,7 +491,7 @@ public class CommonStats implements Streamable, ToXContent { /** * Utility method which computes total memory by adding - * FieldData, Percolate, Segments (memory, index writer, version map) + * FieldData, PercolatorCache, Segments (memory, index writer, version map) */ public ByteSizeValue getTotalMemory() { long size = 0; @@ -499,9 +501,6 @@ public class CommonStats implements Streamable, ToXContent { if (this.getQueryCache() != null) { size += this.getQueryCache().getMemorySizeInBytes(); } - if (this.getPercolate() != null) { - size += this.getPercolate().getMemorySizeInBytes(); - } if (this.getSegments() != null) { size += this.getSegments().getMemoryInBytes() + this.getSegments().getIndexWriterMemoryInBytes() + @@ -547,7 +546,7 @@ public class CommonStats implements Streamable, ToXContent { fieldData = FieldDataStats.readFieldDataStats(in); } if (in.readBoolean()) { - percolate = PercolateStats.readPercolateStats(in); + percolatorCache = PercolatorQueryCacheStats.readPercolateStats(in); } if (in.readBoolean()) { completion = CompletionStats.readCompletionStats(in); @@ -629,11 +628,11 @@ public class CommonStats implements Streamable, ToXContent { out.writeBoolean(true); fieldData.writeTo(out); } - if (percolate == null) { + if (percolatorCache == null) { out.writeBoolean(false); } else { out.writeBoolean(true); - percolate.writeTo(out); + percolatorCache.writeTo(out); } if (completion == null) { out.writeBoolean(false); @@ -689,8 +688,8 @@ public class CommonStats implements Streamable, ToXContent { if (fieldData != null) { fieldData.toXContent(builder, params); } - if (percolate != null) { - percolate.toXContent(builder, params); + if (percolatorCache != null) { + percolatorCache.toXContent(builder, params); } if (completion != null) { completion.toXContent(builder, params); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java index 39608c72a15..c67c35a4108 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java @@ -240,7 +240,7 @@ public class CommonStatsFlags implements Streamable, Cloneable { FieldData("fielddata"), Docs("docs"), Warmer("warmer"), - Percolate("percolate"), + PercolatorCache("percolator_cache"), Completion("completion"), Segments("segments"), Translog("translog"), diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequest.java index 3a74d896fba..4a2d137593e 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequest.java @@ -185,12 +185,12 @@ public class IndicesStatsRequest extends BroadcastRequest { } public IndicesStatsRequest percolate(boolean percolate) { - flags.set(Flag.Percolate, percolate); + flags.set(Flag.PercolatorCache, percolate); return this; } public boolean percolate() { - return flags.isSet(Flag.Percolate); + return flags.isSet(Flag.PercolatorCache); } public IndicesStatsRequest segments(boolean segments) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java index 2189973d9b7..8c12dfa9fda 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java @@ -22,13 +22,13 @@ package org.elasticsearch.action.admin.indices.stats; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; @@ -140,7 +140,7 @@ public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction< flags.fieldDataFields(request.fieldDataFields()); } if (request.percolate()) { - flags.set(CommonStatsFlags.Flag.Percolate); + flags.set(CommonStatsFlags.Flag.PercolatorCache); } if (request.segments()) { flags.set(CommonStatsFlags.Flag.Segments); @@ -163,6 +163,6 @@ public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction< flags.set(CommonStatsFlags.Flag.Recovery); } - return new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexShard, flags), indexShard.commitStats()); + return new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexService.cache().getPercolatorQueryCache(), indexShard, flags), indexShard.commitStats()); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java index 9eab0f80e50..0763f232711 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java @@ -21,12 +21,12 @@ package org.elasticsearch.action.admin.indices.template.delete; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java index a43397e48dc..672ca1a9080 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java @@ -22,12 +22,12 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java index c5fed57d013..02aad2f7ff4 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java @@ -21,13 +21,13 @@ package org.elasticsearch.action.admin.indices.template.put; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java index 6b37f56ed4a..cf288e0cc6f 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java @@ -23,13 +23,13 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java index f3cf2da9fdd..cdf6f585e53 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java @@ -25,7 +25,6 @@ import org.elasticsearch.action.PrimaryMissingActionException; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -35,6 +34,7 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java index 527adeaa3e5..403456cb903 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java @@ -22,13 +22,13 @@ package org.elasticsearch.action.admin.indices.upgrade.post; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaDataUpdateSettingsService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java index fe02a1541a0..320f0696605 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java @@ -29,13 +29,13 @@ import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; import org.elasticsearch.action.support.broadcast.TransportBroadcastAction; import org.elasticsearch.cache.recycler.PageCacheRecycler; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.inject.Inject; diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 707bf8de57f..9d9b36ba072 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -37,16 +37,17 @@ import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.update.TransportUpdateAction; import org.elasticsearch.action.update.UpdateRequest; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndexAlreadyExistsException; @@ -245,17 +246,18 @@ public class TransportBulkAction extends HandledTransportAction list = requestsByShard.get(shardId); if (list == null) { @@ -304,7 +306,7 @@ public class TransportBulkAction extends HandledTransportAction list = requestsByShard.get(shardId); if (list == null) { @@ -314,7 +316,7 @@ public class TransportBulkAction extends HandledTransportAction list = requestsByShard.get(shardId); if (list == null) { @@ -356,18 +358,19 @@ public class TransportBulkAction extends HandledTransportAction responses, int idx, final ConcreteIndices concreteIndices, final MetaData metaData) { - String concreteIndex = concreteIndices.getConcreteIndex(request.index()); + Index concreteIndex = concreteIndices.getConcreteIndex(request.index()); Exception unavailableException = null; if (concreteIndex == null) { try { @@ -397,9 +400,9 @@ public class TransportBulkAction extends HandledTransportAction indices = new HashMap<>(); + private final Map indices = new HashMap<>(); ConcreteIndices(ClusterState state, IndexNameExpressionResolver indexNameExpressionResolver) { this.state = state; this.indexNameExpressionResolver = indexNameExpressionResolver; } - String getConcreteIndex(String indexOrAlias) { + Index getConcreteIndex(String indexOrAlias) { return indices.get(indexOrAlias); } - String resolveIfAbsent(DocumentRequest request) { - String concreteIndex = indices.get(request.index()); + Index resolveIfAbsent(DocumentRequest request) { + Index concreteIndex = indices.get(request.index()); if (concreteIndex == null) { concreteIndex = indexNameExpressionResolver.concreteSingleIndex(state, request); indices.put(request.index(), concreteIndex); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 30f6b03a116..76402df8aa4 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -35,12 +35,12 @@ import org.elasticsearch.action.support.replication.TransportReplicationAction; import org.elasticsearch.action.update.UpdateHelper; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; @@ -74,17 +74,19 @@ public class TransportShardBulkAction extends TransportReplicationAction shardOperationOnPrimary(MetaData metaData, BulkShardRequest request) { - final IndexService indexService = indicesService.indexServiceSafe(request.index()); - final IndexShard indexShard = indexService.getShard(request.shardId().id()); + ShardId shardId = request.shardId(); + final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); + final IndexShard indexShard = indexService.getShard(shardId.getId()); long[] preVersions = new long[request.items().length]; VersionType[] preVersionTypes = new VersionType[request.items().length]; diff --git a/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java b/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java index 3ded0ed8e83..783fab08bae 100644 --- a/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java +++ b/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java @@ -28,13 +28,12 @@ import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.action.support.replication.TransportReplicationAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -60,10 +59,10 @@ public class TransportDeleteAction extends TransportReplicationAction> implements Streamable, */ protected abstract T valueOf(String value, String optionalFormat); + /** + * @param value + * The value to be converted to a String + * @param optionalFormat + * A string describing how to print the specified value. Whether + * this parameter is supported depends on the implementation. If + * optionalFormat is specified and the implementation doesn't + * support it an {@link UnsupportedOperationException} is thrown + */ + public abstract String stringValueOf(Object value, String optionalFormat); + /** * Merges the provided stats into this stats instance. */ @@ -274,6 +287,18 @@ public abstract class FieldStats> implements Streamable, return java.lang.Long.valueOf(value); } + @Override + public String stringValueOf(Object value, String optionalFormat) { + if (optionalFormat != null) { + throw new UnsupportedOperationException("custom format isn't supported"); + } + if (value instanceof Number) { + return java.lang.Long.toString(((Number) value).longValue()); + } else { + throw new IllegalArgumentException("value must be a Long: " + value); + } + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); @@ -327,6 +352,18 @@ public abstract class FieldStats> implements Streamable, return java.lang.Float.valueOf(value); } + @Override + public String stringValueOf(Object value, String optionalFormat) { + if (optionalFormat != null) { + throw new UnsupportedOperationException("custom format isn't supported"); + } + if (value instanceof Number) { + return java.lang.Float.toString(((Number) value).floatValue()); + } else { + throw new IllegalArgumentException("value must be a Float: " + value); + } + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); @@ -380,6 +417,18 @@ public abstract class FieldStats> implements Streamable, return java.lang.Double.valueOf(value); } + @Override + public String stringValueOf(Object value, String optionalFormat) { + if (optionalFormat != null) { + throw new UnsupportedOperationException("custom format isn't supported"); + } + if (value instanceof Number) { + return java.lang.Double.toString(((Number) value).doubleValue()); + } else { + throw new IllegalArgumentException("value must be a Double: " + value); + } + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); @@ -437,6 +486,18 @@ public abstract class FieldStats> implements Streamable, return new BytesRef(value); } + @Override + public String stringValueOf(Object value, String optionalFormat) { + if (optionalFormat != null) { + throw new UnsupportedOperationException("custom format isn't supported"); + } + if (value instanceof BytesRef) { + return ((BytesRef) value).utf8ToString(); + } else { + throw new IllegalArgumentException("value must be a BytesRef: " + value); + } + } + @Override protected void toInnerXContent(XContentBuilder builder) throws IOException { builder.field(Fields.MIN_VALUE, getMinValueAsString()); @@ -490,6 +551,25 @@ public abstract class FieldStats> implements Streamable, return dateFormatter.parser().parseMillis(value); } + @Override + public String stringValueOf(Object value, String optionalFormat) { + FormatDateTimeFormatter dateFormatter = this.dateFormatter; + if (optionalFormat != null) { + dateFormatter = Joda.forPattern(optionalFormat); + } + long millis; + if (value instanceof java.lang.Long) { + millis = ((java.lang.Long) value).longValue(); + } else if (value instanceof DateTime) { + millis = ((DateTime) value).getMillis(); + } else if (value instanceof BytesRef) { + millis = dateFormatter.parser().parseMillis(((BytesRef) value).utf8ToString()); + } else { + throw new IllegalArgumentException("value must be either a DateTime or a long: " + value); + } + return dateFormatter.printer().print(millis); + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); @@ -504,6 +584,28 @@ public abstract class FieldStats> implements Streamable, } + public static class Ip extends Long { + + public Ip(int maxDoc, int docCount, long sumDocFreq, long sumTotalTermFreq, long minValue, long maxValue) { + super(maxDoc, docCount, sumDocFreq, sumTotalTermFreq, minValue, maxValue); + } + + protected Ip(int type, long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq, long minValue, long maxValue) { + super(type, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, minValue, maxValue); + } + + public Ip() { + } + + @Override + public String stringValueOf(Object value, String optionalFormat) { + if (value instanceof BytesRef) { + return super.stringValueOf(IpFieldMapper.ipToLong(((BytesRef) value).utf8ToString()), optionalFormat); + } + return super.stringValueOf(value, optionalFormat); + } + } + public static FieldStats read(StreamInput in) throws IOException { FieldStats stats; byte type = in.readByte(); diff --git a/core/src/main/java/org/elasticsearch/action/fieldstats/TransportFieldStatsTransportAction.java b/core/src/main/java/org/elasticsearch/action/fieldstats/TransportFieldStatsTransportAction.java index 42360c5e0eb..de56a0f5c2e 100644 --- a/core/src/main/java/org/elasticsearch/action/fieldstats/TransportFieldStatsTransportAction.java +++ b/core/src/main/java/org/elasticsearch/action/fieldstats/TransportFieldStatsTransportAction.java @@ -28,13 +28,13 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; import org.elasticsearch.action.support.broadcast.TransportBroadcastAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; diff --git a/core/src/main/java/org/elasticsearch/action/get/TransportGetAction.java b/core/src/main/java/org/elasticsearch/action/get/TransportGetAction.java index 2d6bafc9623..b84493c4dca 100644 --- a/core/src/main/java/org/elasticsearch/action/get/TransportGetAction.java +++ b/core/src/main/java/org/elasticsearch/action/get/TransportGetAction.java @@ -22,12 +22,12 @@ package org.elasticsearch.action.get; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.single.shard.TransportSingleShardAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.Preference; import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; diff --git a/core/src/main/java/org/elasticsearch/action/get/TransportMultiGetAction.java b/core/src/main/java/org/elasticsearch/action/get/TransportMultiGetAction.java index 7f5de65c614..1858ac8ba71 100644 --- a/core/src/main/java/org/elasticsearch/action/get/TransportMultiGetAction.java +++ b/core/src/main/java/org/elasticsearch/action/get/TransportMultiGetAction.java @@ -22,10 +22,10 @@ package org.elasticsearch.action.get; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AtomicArray; @@ -69,7 +69,7 @@ public class TransportMultiGetAction extends HandledTransportAction implements Do return this.versionType; } - private Version getVersion(MetaData metaData, String concreteIndex) { - // this can go away in 3.0 but is here now for easy backporting - since in 2.x we need the version on the timestamp stuff - final IndexMetaData indexMetaData = metaData.getIndices().get(concreteIndex); - if (indexMetaData == null) { - throw new IndexNotFoundException(concreteIndex); - } - return Version.indexCreated(indexMetaData.getSettings()); - } public void process(MetaData metaData, @Nullable MappingMetaData mappingMd, boolean allowIdGeneration, String concreteIndex) { // resolve the routing if needed @@ -600,8 +593,7 @@ public class IndexRequest extends ReplicationRequest implements Do // resolve timestamp if provided externally if (timestamp != null) { timestamp = MappingMetaData.Timestamp.parseStringTimestamp(timestamp, - mappingMd != null ? mappingMd.timestamp().dateTimeFormatter() : TimestampFieldMapper.Defaults.DATE_TIME_FORMATTER, - getVersion(metaData, concreteIndex)); + mappingMd != null ? mappingMd.timestamp().dateTimeFormatter() : TimestampFieldMapper.Defaults.DATE_TIME_FORMATTER); } if (mappingMd != null) { // might as well check for routing here @@ -645,7 +637,7 @@ public class IndexRequest extends ReplicationRequest implements Do // assigned again because mappingMd and // mappingMd#timestamp() are not null assert mappingMd != null; - timestamp = MappingMetaData.Timestamp.parseStringTimestamp(defaultTimestamp, mappingMd.timestamp().dateTimeFormatter(), getVersion(metaData, concreteIndex)); + timestamp = MappingMetaData.Timestamp.parseStringTimestamp(defaultTimestamp, mappingMd.timestamp().dateTimeFormatter()); } } } diff --git a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java index fdd018c51f2..9be8e4cef89 100644 --- a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java @@ -28,7 +28,6 @@ import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.action.support.replication.TransportReplicationAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; @@ -36,6 +35,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -69,6 +69,7 @@ public class TransportIndexAction extends TransportReplicationAction shardOperationOnPrimary(MetaData metaData, IndexRequest request) throws Exception { // validate, if routing is required, that we got routing - IndexMetaData indexMetaData = metaData.index(request.shardId().getIndex()); + IndexMetaData indexMetaData = metaData.getIndexSafe(request.shardId().getIndex()); MappingMetaData mappingMd = indexMetaData.mappingOrDefault(request.type()); if (mappingMd != null && mappingMd.routing().required()) { if (request.routing() == null) { @@ -205,8 +207,7 @@ public class TransportIndexAction extends TransportReplicationAction actionListener = bulkRequestModifier.wrapActionListenerIfNeeded(ingestTookInMillis, listener); if (bulkRequest.requests().isEmpty()) { diff --git a/core/src/main/java/org/elasticsearch/action/ingest/IngestProxyActionFilter.java b/core/src/main/java/org/elasticsearch/action/ingest/IngestProxyActionFilter.java index 39a4b1fa4e8..62716c6dc0d 100644 --- a/core/src/main/java/org/elasticsearch/action/ingest/IngestProxyActionFilter.java +++ b/core/src/main/java/org/elasticsearch/action/ingest/IngestProxyActionFilter.java @@ -29,9 +29,9 @@ import org.elasticsearch.action.index.IndexAction; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.action.support.ActionFilterChain; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; diff --git a/core/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java b/core/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java index 31a911207ab..e1a34413e2c 100644 --- a/core/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java +++ b/core/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java @@ -20,31 +20,43 @@ package org.elasticsearch.action.ingest; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.action.admin.cluster.node.info.TransportNodesInfoAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.ingest.PipelineStore; +import org.elasticsearch.ingest.core.IngestInfo; import org.elasticsearch.node.service.NodeService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.HashMap; +import java.util.Map; + public class PutPipelineTransportAction extends TransportMasterNodeAction { private final PipelineStore pipelineStore; private final ClusterService clusterService; + private final TransportNodesInfoAction nodesInfoAction; @Inject public PutPipelineTransportAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, NodeService nodeService) { + IndexNameExpressionResolver indexNameExpressionResolver, NodeService nodeService, + TransportNodesInfoAction nodesInfoAction) { super(settings, PutPipelineAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, PutPipelineRequest::new); this.clusterService = clusterService; + this.nodesInfoAction = nodesInfoAction; this.pipelineStore = nodeService.getIngestService().getPipelineStore(); } @@ -60,7 +72,28 @@ public class PutPipelineTransportAction extends TransportMasterNodeAction listener) throws Exception { - pipelineStore.put(clusterService, request, listener); + NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(); + nodesInfoRequest.clear(); + nodesInfoRequest.ingest(true); + nodesInfoAction.execute(nodesInfoRequest, new ActionListener() { + @Override + public void onResponse(NodesInfoResponse nodeInfos) { + try { + Map ingestInfos = new HashMap<>(); + for (NodeInfo nodeInfo : nodeInfos) { + ingestInfos.put(nodeInfo.getNode(), nodeInfo.getIngest()); + } + pipelineStore.put(clusterService, ingestInfos, request, listener); + } catch (Exception e) { + onFailure(e); + } + } + + @Override + public void onFailure(Throwable e) { + listener.onFailure(e); + } + }); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/percolate/MultiPercolateRequest.java b/core/src/main/java/org/elasticsearch/action/percolate/MultiPercolateRequest.java index 74537379d1d..ac49fed763a 100644 --- a/core/src/main/java/org/elasticsearch/action/percolate/MultiPercolateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/percolate/MultiPercolateRequest.java @@ -163,11 +163,7 @@ public class MultiPercolateRequest extends ActionRequest @Override public List subRequests() { - List indicesRequests = new ArrayList<>(); - for (PercolateRequest percolateRequest : this.requests) { - indicesRequests.addAll(percolateRequest.subRequests()); - } - return indicesRequests; + return requests; } private void parsePercolateAction(XContentParser parser, PercolateRequest percolateRequest, boolean allowExplicitIndex) throws IOException { diff --git a/core/src/main/java/org/elasticsearch/action/percolate/PercolateRequest.java b/core/src/main/java/org/elasticsearch/action/percolate/PercolateRequest.java index e69da6bf519..c9887cba03f 100644 --- a/core/src/main/java/org/elasticsearch/action/percolate/PercolateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/percolate/PercolateRequest.java @@ -19,10 +19,12 @@ package org.elasticsearch.action.percolate; import org.elasticsearch.ElasticsearchGenerationException; +import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.CompositeIndicesRequest; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.broadcast.BroadcastRequest; import org.elasticsearch.client.Requests; import org.elasticsearch.common.bytes.BytesArray; @@ -43,49 +45,37 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; /** * A request to execute a percolate operation. */ -public class PercolateRequest extends BroadcastRequest implements CompositeIndicesRequest { +public class PercolateRequest extends ActionRequest implements IndicesRequest.Replaceable { + protected String[] indices; + private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpenAndForbidClosed(); private String documentType; private String routing; private String preference; - private GetRequest getRequest; private boolean onlyCount; + private GetRequest getRequest; private BytesReference source; - private BytesReference docSource; - - // Used internally in order to compute tookInMillis, TransportBroadcastAction itself doesn't allow - // to hold it temporarily in an easy way - long startTime; - - /** - * Constructor only for internal usage. - */ - public PercolateRequest() { + public String[] indices() { + return indices; } - PercolateRequest(PercolateRequest request, BytesReference docSource) { - this.indices = request.indices(); - this.documentType = request.documentType(); - this.routing = request.routing(); - this.preference = request.preference(); - this.source = request.source; - this.docSource = docSource; - this.onlyCount = request.onlyCount; - this.startTime = request.startTime; + public final PercolateRequest indices(String... indices) { + this.indices = indices; + return this; } - @Override - public List subRequests() { - List requests = new ArrayList<>(); - requests.add(this); - if (getRequest != null) { - requests.add(getRequest); - } - return requests; + public IndicesOptions indicesOptions() { + return indicesOptions; } + public PercolateRequest indicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = indicesOptions; + return this; + } + + /** * Getter for {@link #documentType(String)} */ @@ -244,13 +234,9 @@ public class PercolateRequest extends BroadcastRequest impleme return this; } - BytesReference docSource() { - return docSource; - } - @Override public ActionRequestValidationException validate() { - ActionRequestValidationException validationException = super.validate(); + ActionRequestValidationException validationException = null; if (documentType == null) { validationException = addValidationError("type is missing", validationException); } @@ -266,12 +252,12 @@ public class PercolateRequest extends BroadcastRequest impleme @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - startTime = in.readVLong(); + indices = in.readStringArray(); + indicesOptions = IndicesOptions.readIndicesOptions(in); documentType = in.readString(); routing = in.readOptionalString(); preference = in.readOptionalString(); source = in.readBytesReference(); - docSource = in.readBytesReference(); if (in.readBoolean()) { getRequest = new GetRequest(); getRequest.readFrom(in); @@ -282,12 +268,12 @@ public class PercolateRequest extends BroadcastRequest impleme @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeVLong(startTime); + out.writeStringArrayNullable(indices); + indicesOptions.writeIndicesOptions(out); out.writeString(documentType); out.writeOptionalString(routing); out.writeOptionalString(preference); out.writeBytesReference(source); - out.writeBytesReference(docSource); if (getRequest != null) { out.writeBoolean(true); getRequest.writeTo(out); diff --git a/core/src/main/java/org/elasticsearch/action/percolate/PercolateRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/percolate/PercolateRequestBuilder.java index 472938cfbf1..83757dab089 100644 --- a/core/src/main/java/org/elasticsearch/action/percolate/PercolateRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/percolate/PercolateRequestBuilder.java @@ -18,7 +18,9 @@ */ package org.elasticsearch.action.percolate; +import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.Strings; @@ -36,7 +38,7 @@ import java.util.Map; /** * A builder the easy to use of defining a percolate request. */ -public class PercolateRequestBuilder extends BroadcastOperationRequestBuilder { +public class PercolateRequestBuilder extends ActionRequestBuilder { private PercolateSourceBuilder sourceBuilder; @@ -44,6 +46,16 @@ public class PercolateRequestBuilder extends BroadcastOperationRequestBuilder, ToXContent { public static final Match[] EMPTY = new Match[0]; + // PercolatorQuery emits this score if no 'query' is defined in the percolate request + public final static float NO_SCORE = 0.0f; private long tookInMillis; private Match[] matches; @@ -65,15 +66,6 @@ public class PercolateResponse extends BroadcastResponse implements Iterable shardFailures, long tookInMillis, Match[] matches) { - super(totalShards, successfulShards, failedShards, shardFailures); - if (tookInMillis < 0) { - throw new IllegalArgumentException("tookInMillis must be positive but was: " + tookInMillis); - } - this.tookInMillis = tookInMillis; - this.matches = matches; - } - PercolateResponse() { } @@ -136,10 +128,10 @@ public class PercolateResponse extends BroadcastResponse implements Iterable ids; - private Map> hls; - private boolean onlyCount; - private int requestedSize; - - private InternalAggregations aggregations; - private List pipelineAggregators; - - PercolateShardResponse() { - } - - public PercolateShardResponse(TopDocs topDocs, Map ids, Map> hls, PercolateContext context) { - super(context.indexShard().shardId()); - this.topDocs = topDocs; - this.ids = ids; - this.hls = hls; - this.onlyCount = context.isOnlyCount(); - this.requestedSize = context.size(); - QuerySearchResult result = context.queryResult(); - if (result != null) { - if (result.aggregations() != null) { - this.aggregations = (InternalAggregations) result.aggregations(); - } - this.pipelineAggregators = result.pipelineAggregators(); - } - } - - public TopDocs topDocs() { - return topDocs; - } - - /** - * Returns per match the percolator query id. The key is the Lucene docId of the matching percolator query. - */ - public Map ids() { - return ids; - } - - public int requestedSize() { - return requestedSize; - } - - /** - * Returns per match the highlight snippets. The key is the Lucene docId of the matching percolator query. - */ - public Map> hls() { - return hls; - } - - public InternalAggregations aggregations() { - return aggregations; - } - - public List pipelineAggregators() { - return pipelineAggregators; - } - - public boolean onlyCount() { - return onlyCount; - } - - public boolean isEmpty() { - return topDocs.totalHits == 0; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - onlyCount = in.readBoolean(); - requestedSize = in.readVInt(); - topDocs = Lucene.readTopDocs(in); - int size = in.readVInt(); - ids = new HashMap<>(size); - for (int i = 0; i < size; i++) { - ids.put(in.readVInt(), in.readString()); - } - size = in.readVInt(); - hls = new HashMap<>(size); - for (int i = 0; i < size; i++) { - int docId = in.readVInt(); - int mSize = in.readVInt(); - Map fields = new HashMap<>(); - for (int j = 0; j < mSize; j++) { - fields.put(in.readString(), HighlightField.readHighlightField(in)); - } - hls.put(docId, fields); - } - aggregations = InternalAggregations.readOptionalAggregations(in); - if (in.readBoolean()) { - int pipelineAggregatorsSize = in.readVInt(); - List pipelineAggregators = new ArrayList<>(pipelineAggregatorsSize); - for (int i = 0; i < pipelineAggregatorsSize; i++) { - BytesReference type = in.readBytesReference(); - PipelineAggregator pipelineAggregator = PipelineAggregatorStreams.stream(type).readResult(in); - pipelineAggregators.add((SiblingPipelineAggregator) pipelineAggregator); - } - this.pipelineAggregators = pipelineAggregators; - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeBoolean(onlyCount); - out.writeVLong(requestedSize); - Lucene.writeTopDocs(out, topDocs); - out.writeVInt(ids.size()); - for (Map.Entry entry : ids.entrySet()) { - out.writeVInt(entry.getKey()); - out.writeString(entry.getValue()); - } - out.writeVInt(hls.size()); - for (Map.Entry> entry1 : hls.entrySet()) { - out.writeVInt(entry1.getKey()); - out.writeVInt(entry1.getValue().size()); - for (Map.Entry entry2 : entry1.getValue().entrySet()) { - out.writeString(entry2.getKey()); - entry2.getValue().writeTo(out); - } - } - out.writeOptionalStreamable(aggregations); - if (pipelineAggregators == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeVInt(pipelineAggregators.size()); - for (PipelineAggregator pipelineAggregator : pipelineAggregators) { - out.writeBytesReference(pipelineAggregator.type().stream()); - pipelineAggregator.writeTo(out); - } - } - } -} diff --git a/core/src/main/java/org/elasticsearch/action/percolate/TransportMultiPercolateAction.java b/core/src/main/java/org/elasticsearch/action/percolate/TransportMultiPercolateAction.java index d86d91c654e..bf0d79d884e 100644 --- a/core/src/main/java/org/elasticsearch/action/percolate/TransportMultiPercolateAction.java +++ b/core/src/main/java/org/elasticsearch/action/percolate/TransportMultiPercolateAction.java @@ -19,114 +19,91 @@ package org.elasticsearch.action.percolate; -import com.carrotsearch.hppc.IntArrayList; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.get.GetRequest; -import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.get.MultiGetItemResponse; import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.get.MultiGetResponse; -import org.elasticsearch.action.get.TransportMultiGetAction; +import org.elasticsearch.action.search.MultiSearchRequest; +import org.elasticsearch.action.search.MultiSearchResponse; +import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.routing.GroupShardsIterator; -import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.AtomicArray; -import org.elasticsearch.index.IndexNotFoundException; -import org.elasticsearch.index.engine.DocumentMissingException; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.percolator.PercolatorService; +import org.elasticsearch.indices.query.IndicesQueriesRegistry; +import org.elasticsearch.search.aggregations.AggregatorParsers; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Set; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReferenceArray; -/** - */ public class TransportMultiPercolateAction extends HandledTransportAction { - private final ClusterService clusterService; - private final PercolatorService percolatorService; - - private final TransportMultiGetAction multiGetAction; - private final TransportShardMultiPercolateAction shardMultiPercolateAction; + private final Client client; + private final ParseFieldMatcher parseFieldMatcher; + private final IndicesQueriesRegistry queryRegistry; + private final AggregatorParsers aggParsers; @Inject - public TransportMultiPercolateAction(Settings settings, ThreadPool threadPool, TransportShardMultiPercolateAction shardMultiPercolateAction, - ClusterService clusterService, TransportService transportService, PercolatorService percolatorService, - TransportMultiGetAction multiGetAction, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + public TransportMultiPercolateAction(Settings settings, ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, Client client, IndicesQueriesRegistry queryRegistry, + AggregatorParsers aggParsers) { super(settings, MultiPercolateAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, MultiPercolateRequest::new); - this.shardMultiPercolateAction = shardMultiPercolateAction; - this.clusterService = clusterService; - this.percolatorService = percolatorService; - this.multiGetAction = multiGetAction; + this.client = client; + this.aggParsers = aggParsers; + this.parseFieldMatcher = new ParseFieldMatcher(settings); + this.queryRegistry = queryRegistry; } @Override - protected void doExecute(final MultiPercolateRequest request, final ActionListener listener) { - final ClusterState clusterState = clusterService.state(); - clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ); - - final List percolateRequests = new ArrayList<>(request.requests().size()); - // Can have a mixture of percolate requests. (normal percolate requests & percolate existing doc), - // so we need to keep track for what percolate request we had a get request - final IntArrayList getRequestSlots = new IntArrayList(); - List existingDocsRequests = new ArrayList<>(); - for (int slot = 0; slot < request.requests().size(); slot++) { - PercolateRequest percolateRequest = request.requests().get(slot); - percolateRequest.startTime = System.currentTimeMillis(); - percolateRequests.add(percolateRequest); - if (percolateRequest.getRequest() != null) { - existingDocsRequests.add(percolateRequest.getRequest()); - getRequestSlots.add(slot); + protected void doExecute(MultiPercolateRequest request, ActionListener listener) { + List> getRequests = new ArrayList<>(); + for (int i = 0; i < request.requests().size(); i++) { + GetRequest getRequest = request.requests().get(i).getRequest(); + if (getRequest != null) { + getRequests.add(new Tuple<>(i, getRequest)); } } - - if (!existingDocsRequests.isEmpty()) { - final MultiGetRequest multiGetRequest = new MultiGetRequest(); - for (GetRequest getRequest : existingDocsRequests) { - multiGetRequest.add( - new MultiGetRequest.Item(getRequest.index(), getRequest.type(), getRequest.id()) - .routing(getRequest.routing()) - ); + if (getRequests.isEmpty()) { + innerDoExecute(request, listener, Collections.emptyMap(), new HashMap<>()); + } else { + MultiGetRequest multiGetRequest = new MultiGetRequest(); + for (Tuple tuple : getRequests) { + GetRequest getRequest = tuple.v2(); + multiGetRequest.add(new MultiGetRequest.Item(getRequest.index(), getRequest.type(), getRequest.id())); } - - multiGetAction.execute(multiGetRequest, new ActionListener() { - + client.multiGet(multiGetRequest, new ActionListener() { @Override - public void onResponse(MultiGetResponse multiGetItemResponses) { - for (int i = 0; i < multiGetItemResponses.getResponses().length; i++) { - MultiGetItemResponse itemResponse = multiGetItemResponses.getResponses()[i]; - int slot = getRequestSlots.get(i); - if (!itemResponse.isFailed()) { - GetResponse getResponse = itemResponse.getResponse(); - if (getResponse.isExists()) { - PercolateRequest originalRequest = (PercolateRequest) percolateRequests.get(slot); - percolateRequests.set(slot, new PercolateRequest(originalRequest, getResponse.getSourceAsBytesRef())); - } else { - logger.trace("mpercolate existing doc, item[{}] doesn't exist", slot); - percolateRequests.set(slot, new DocumentMissingException(null, getResponse.getType(), getResponse.getId())); - } + public void onResponse(MultiGetResponse response) { + Map getResponseSources = new HashMap<>(response.getResponses().length); + Map preFailures = new HashMap<>(); + for (int i = 0; i < response.getResponses().length; i++) { + MultiGetItemResponse itemResponse = response.getResponses()[i]; + int originalSlot = getRequests.get(i).v1(); + if (itemResponse.isFailed()) { + preFailures.put(originalSlot, new MultiPercolateResponse.Item(itemResponse.getFailure().getFailure())); } else { - logger.trace("mpercolate existing doc, item[{}] failure {}", slot, itemResponse.getFailure()); - percolateRequests.set(slot, itemResponse.getFailure()); + if (itemResponse.getResponse().isExists()) { + getResponseSources.put(originalSlot, itemResponse.getResponse().getSourceAsBytesRef()); + } else { + GetRequest getRequest = getRequests.get(i).v2(); + preFailures.put(originalSlot, new MultiPercolateResponse.Item(new ResourceNotFoundException("percolate document [{}/{}/{}] doesn't exist", getRequest.index(), getRequest.type(), getRequest.id()))); + } } } - new ASyncAction(request, percolateRequests, listener, clusterState).run(); + innerDoExecute(request, listener, getResponseSources, preFailures); } @Override @@ -134,200 +111,81 @@ public class TransportMultiPercolateAction extends HandledTransportAction finalListener; - final Map requestsByShard; - final MultiPercolateRequest multiPercolateRequest; - final List percolateRequests; - - final Map shardToSlots; - final AtomicInteger expectedOperations; - final AtomicArray reducedResponses; - final AtomicReferenceArray expectedOperationsPerItem; - final AtomicReferenceArray responsesByItemAndShard; - - ASyncAction(MultiPercolateRequest multiPercolateRequest, List percolateRequests, ActionListener finalListener, ClusterState clusterState) { - this.finalListener = finalListener; - this.multiPercolateRequest = multiPercolateRequest; - this.percolateRequests = percolateRequests; - responsesByItemAndShard = new AtomicReferenceArray<>(percolateRequests.size()); - expectedOperationsPerItem = new AtomicReferenceArray<>(percolateRequests.size()); - reducedResponses = new AtomicArray<>(percolateRequests.size()); - - // Resolving concrete indices and routing and grouping the requests by shard - requestsByShard = new HashMap<>(); - // Keep track what slots belong to what shard, in case a request to a shard fails on all copies - shardToSlots = new HashMap<>(); - int expectedResults = 0; - for (int slot = 0; slot < percolateRequests.size(); slot++) { - Object element = percolateRequests.get(slot); - assert element != null; - if (element instanceof PercolateRequest) { - PercolateRequest percolateRequest = (PercolateRequest) element; - String[] concreteIndices; - try { - concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, percolateRequest); - } catch (IndexNotFoundException e) { - reducedResponses.set(slot, e); - responsesByItemAndShard.set(slot, new AtomicReferenceArray(0)); - expectedOperationsPerItem.set(slot, new AtomicInteger(0)); - continue; - } - Map> routing = indexNameExpressionResolver.resolveSearchRouting(clusterState, percolateRequest.routing(), percolateRequest.indices()); - // TODO: I only need shardIds, ShardIterator(ShardRouting) is only needed in TransportShardMultiPercolateAction - GroupShardsIterator shards = clusterService.operationRouting().searchShards( - clusterState, concreteIndices, routing, percolateRequest.preference() - ); - if (shards.size() == 0) { - reducedResponses.set(slot, new UnavailableShardsException(null, "No shards available")); - responsesByItemAndShard.set(slot, new AtomicReferenceArray(0)); - expectedOperationsPerItem.set(slot, new AtomicInteger(0)); - continue; - } - - // The shard id is used as index in the atomic ref array, so we need to find out how many shards there are regardless of routing: - int numShards = clusterService.operationRouting().searchShardsCount(clusterState, concreteIndices, null); - responsesByItemAndShard.set(slot, new AtomicReferenceArray(numShards)); - expectedOperationsPerItem.set(slot, new AtomicInteger(shards.size())); - for (ShardIterator shard : shards) { - ShardId shardId = shard.shardId(); - TransportShardMultiPercolateAction.Request requests = requestsByShard.get(shardId); - if (requests == null) { - requestsByShard.put(shardId, requests = new TransportShardMultiPercolateAction.Request(shardId.getIndexName(), shardId.getId(), percolateRequest.preference())); - } - logger.trace("Adding shard[{}] percolate request for item[{}]", shardId, slot); - requests.add(new TransportShardMultiPercolateAction.Request.Item(slot, new PercolateShardRequest(shardId, percolateRequest))); - - IntArrayList items = shardToSlots.get(shardId); - if (items == null) { - shardToSlots.put(shardId, items = new IntArrayList()); - } - items.add(slot); - } - expectedResults++; - } else if (element instanceof Throwable || element instanceof MultiGetResponse.Failure) { - logger.trace("item[{}] won't be executed, reason: {}", slot, element); - reducedResponses.set(slot, element); - responsesByItemAndShard.set(slot, new AtomicReferenceArray(0)); - expectedOperationsPerItem.set(slot, new AtomicInteger(0)); - } - } - expectedOperations = new AtomicInteger(expectedResults); - } - - void run() { - if (expectedOperations.get() == 0) { - finish(); - return; - } - - logger.trace("mpercolate executing for shards {}", requestsByShard.keySet()); - for (Map.Entry entry : requestsByShard.entrySet()) { - final ShardId shardId = entry.getKey(); - TransportShardMultiPercolateAction.Request shardRequest = entry.getValue(); - shardMultiPercolateAction.execute(shardRequest, new ActionListener() { - + private void innerDoExecute(MultiPercolateRequest request, ActionListener listener, Map getResponseSources, Map preFailures) { + try { + MultiSearchRequest multiSearchRequest = createMultiSearchRequest(request, getResponseSources, preFailures); + if (multiSearchRequest.requests().isEmpty()) { + // we may failed to turn all percolate requests into search requests, + // in that case just return the response... + listener.onResponse( + createMultiPercolateResponse(new MultiSearchResponse(new MultiSearchResponse.Item[0]), request, preFailures) + ); + } else { + client.multiSearch(multiSearchRequest, new ActionListener() { @Override - public void onResponse(TransportShardMultiPercolateAction.Response response) { - onShardResponse(shardId, response); + public void onResponse(MultiSearchResponse response) { + try { + listener.onResponse(createMultiPercolateResponse(response, request, preFailures)); + } catch (Exception e) { + onFailure(e); + } } @Override public void onFailure(Throwable e) { - onShardFailure(shardId, e); + listener.onFailure(e); } - }); } + } catch (Exception e) { + listener.onFailure(e); } + } - @SuppressWarnings("unchecked") - void onShardResponse(ShardId shardId, TransportShardMultiPercolateAction.Response response) { - logger.trace("{} Percolate shard response", shardId); + private MultiSearchRequest createMultiSearchRequest(MultiPercolateRequest multiPercolateRequest, Map getResponseSources, Map preFailures) throws IOException { + MultiSearchRequest multiSearchRequest = new MultiSearchRequest(); + multiSearchRequest.indicesOptions(multiPercolateRequest.indicesOptions()); + + for (int i = 0; i < multiPercolateRequest.requests().size(); i++) { + if (preFailures.keySet().contains(i)) { + continue; + } + + PercolateRequest percolateRequest = multiPercolateRequest.requests().get(i); + BytesReference docSource = getResponseSources.get(i); try { - for (TransportShardMultiPercolateAction.Response.Item item : response.items()) { - AtomicReferenceArray shardResults = responsesByItemAndShard.get(item.slot()); - if (shardResults == null) { - assert false : "shardResults can't be null"; - continue; - } - - if (item.failed()) { - shardResults.set(shardId.id(), new BroadcastShardOperationFailedException(shardId, item.error())); - } else { - shardResults.set(shardId.id(), item.response()); - } - - assert expectedOperationsPerItem.get(item.slot()).get() >= 1 : "slot[" + item.slot() + "] can't be lower than one"; - if (expectedOperationsPerItem.get(item.slot()).decrementAndGet() == 0) { - // Failure won't bubble up, since we fail the whole request now via the catch clause below, - // so expectedOperationsPerItem will not be decremented twice. - reduce(item.slot()); - } - } - } catch (Throwable e) { - logger.error("{} Percolate original reduce error", e, shardId); - finalListener.onFailure(e); + SearchRequest searchRequest = TransportPercolateAction.createSearchRequest( + percolateRequest, docSource, queryRegistry, aggParsers, parseFieldMatcher + ); + multiSearchRequest.add(searchRequest); + } catch (Exception e) { + preFailures.put(i, new MultiPercolateResponse.Item(e)); } } - @SuppressWarnings("unchecked") - void onShardFailure(ShardId shardId, Throwable e) { - logger.debug("{} Shard multi percolate failure", e, shardId); - try { - IntArrayList slots = shardToSlots.get(shardId); - for (int i = 0; i < slots.size(); i++) { - int slot = slots.get(i); - AtomicReferenceArray shardResults = responsesByItemAndShard.get(slot); - if (shardResults == null) { - continue; - } + return multiSearchRequest; + } - shardResults.set(shardId.id(), new BroadcastShardOperationFailedException(shardId, e)); - assert expectedOperationsPerItem.get(slot).get() >= 1 : "slot[" + slot + "] can't be lower than one. Caused by: " + e.getMessage(); - if (expectedOperationsPerItem.get(slot).decrementAndGet() == 0) { - reduce(slot); - } - } - } catch (Throwable t) { - logger.error("{} Percolate original reduce error, original error {}", t, shardId, e); - finalListener.onFailure(t); - } - } - - void reduce(int slot) { - AtomicReferenceArray shardResponses = responsesByItemAndShard.get(slot); - PercolateResponse reducedResponse = TransportPercolateAction.reduce((PercolateRequest) percolateRequests.get(slot), shardResponses, percolatorService); - reducedResponses.set(slot, reducedResponse); - assert expectedOperations.get() >= 1 : "slot[" + slot + "] expected options should be >= 1 but is " + expectedOperations.get(); - if (expectedOperations.decrementAndGet() == 0) { - finish(); - } - } - - void finish() { - MultiPercolateResponse.Item[] finalResponse = new MultiPercolateResponse.Item[reducedResponses.length()]; - for (int slot = 0; slot < reducedResponses.length(); slot++) { - Object element = reducedResponses.get(slot); - assert element != null : "Element[" + slot + "] shouldn't be null"; - if (element instanceof PercolateResponse) { - finalResponse[slot] = new MultiPercolateResponse.Item((PercolateResponse) element); - } else if (element instanceof Throwable) { - finalResponse[slot] = new MultiPercolateResponse.Item((Throwable)element); - } else if (element instanceof MultiGetResponse.Failure) { - finalResponse[slot] = new MultiPercolateResponse.Item(((MultiGetResponse.Failure)element).getFailure()); + private MultiPercolateResponse createMultiPercolateResponse(MultiSearchResponse multiSearchResponse, MultiPercolateRequest request, Map preFailures) { + int searchResponseIndex = 0; + MultiPercolateResponse.Item[] percolateItems = new MultiPercolateResponse.Item[request.requests().size()]; + for (int i = 0; i < percolateItems.length; i++) { + if (preFailures.keySet().contains(i)) { + percolateItems[i] = preFailures.get(i); + } else { + MultiSearchResponse.Item searchItem = multiSearchResponse.getResponses()[searchResponseIndex++]; + if (searchItem.isFailure()) { + percolateItems[i] = new MultiPercolateResponse.Item(searchItem.getFailure()); + } else { + PercolateRequest percolateRequest = request.requests().get(i); + percolateItems[i] = new MultiPercolateResponse.Item(TransportPercolateAction.createPercolateResponse(searchItem.getResponse(), percolateRequest.onlyCount())); } } - finalListener.onResponse(new MultiPercolateResponse(finalResponse)); } - + return new MultiPercolateResponse(percolateItems); } } diff --git a/core/src/main/java/org/elasticsearch/action/percolate/TransportPercolateAction.java b/core/src/main/java/org/elasticsearch/action/percolate/TransportPercolateAction.java index 2a8f1a4ed24..b23ef04021e 100644 --- a/core/src/main/java/org/elasticsearch/action/percolate/TransportPercolateAction.java +++ b/core/src/main/java/org/elasticsearch/action/percolate/TransportPercolateAction.java @@ -18,71 +18,74 @@ */ package org.elasticsearch.action.percolate; -import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.action.get.TransportGetAction; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.TransportBroadcastAction; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.routing.GroupShardsIterator; -import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.engine.DocumentMissingException; -import org.elasticsearch.percolator.PercolateException; -import org.elasticsearch.percolator.PercolatorService; -import org.elasticsearch.tasks.Task; +import org.elasticsearch.common.text.Text; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.PercolatorQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.QueryParseContext; +import org.elasticsearch.index.query.TemplateQueryParser; +import org.elasticsearch.indices.query.IndicesQueriesRegistry; +import org.elasticsearch.rest.action.support.RestActions; +import org.elasticsearch.script.Template; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.aggregations.AggregatorParsers; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.atomic.AtomicReferenceArray; +import java.util.Arrays; -/** - * - */ -public class TransportPercolateAction extends TransportBroadcastAction { +public class TransportPercolateAction extends HandledTransportAction { - private final PercolatorService percolatorService; - private final TransportGetAction getAction; + private final Client client; + private final ParseFieldMatcher parseFieldMatcher; + private final IndicesQueriesRegistry queryRegistry; + private final AggregatorParsers aggParsers; @Inject - public TransportPercolateAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, - TransportService transportService, PercolatorService percolatorService, - TransportGetAction getAction, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, PercolateAction.NAME, threadPool, clusterService, transportService, actionFilters, - indexNameExpressionResolver, PercolateRequest::new, PercolateShardRequest::new, ThreadPool.Names.PERCOLATE); - this.percolatorService = percolatorService; - this.getAction = getAction; + public TransportPercolateAction(Settings settings, ThreadPool threadPool, TransportService transportService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + Client client, IndicesQueriesRegistry indicesQueriesRegistry, AggregatorParsers aggParsers) { + super(settings, PercolateAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, PercolateRequest::new); + this.client = client; + this.aggParsers = aggParsers; + this.parseFieldMatcher = new ParseFieldMatcher(settings); + this.queryRegistry = indicesQueriesRegistry; } @Override - protected void doExecute(Task task, final PercolateRequest request, final ActionListener listener) { - request.startTime = System.currentTimeMillis(); + protected void doExecute(PercolateRequest request, ActionListener listener) { if (request.getRequest() != null) { - getAction.execute(request.getRequest(), new ActionListener() { + client.get(request.getRequest(), new ActionListener() { @Override public void onResponse(GetResponse getResponse) { - if (!getResponse.isExists()) { - onFailure(new DocumentMissingException(null, request.getRequest().type(), request.getRequest().id())); - return; + if (getResponse.isExists()) { + innerDoExecute(request, getResponse.getSourceAsBytesRef(), listener); + } else { + onFailure(new ResourceNotFoundException("percolate document [{}/{}/{}] doesn't exist", request.getRequest().index(), request.getRequest().type(), request.getRequest().id())); } - - BytesReference docSource = getResponse.getSourceAsBytesRef(); - TransportPercolateAction.super.doExecute(task, new PercolateRequest(request, docSource), listener); } @Override @@ -91,99 +94,153 @@ public class TransportPercolateAction extends TransportBroadcastAction shardResults = null; - List shardFailures = null; - - boolean onlyCount = false; - for (int i = 0; i < shardsResponses.length(); i++) { - Object shardResponse = shardsResponses.get(i); - if (shardResponse == null) { - // simply ignore non active shards - } else if (shardResponse instanceof BroadcastShardOperationFailedException) { - failedShards++; - if (shardFailures == null) { - shardFailures = new ArrayList<>(); - } - shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse)); - } else { - PercolateShardResponse percolateShardResponse = (PercolateShardResponse) shardResponse; - successfulShards++; - if (!percolateShardResponse.isEmpty()) { - if (shardResults == null) { - onlyCount = percolateShardResponse.onlyCount(); - shardResults = new ArrayList<>(); - } - shardResults.add(percolateShardResponse); - } - } - } - - if (shardResults == null) { - long tookInMillis = Math.max(1, System.currentTimeMillis() - request.startTime); - PercolateResponse.Match[] matches = request.onlyCount() ? null : PercolateResponse.EMPTY; - return new PercolateResponse(shardsResponses.length(), successfulShards, failedShards, shardFailures, tookInMillis, matches); - } else { - PercolatorService.ReduceResult result = null; - try { - result = percolatorService.reduce(onlyCount, shardResults); - } catch (IOException e) { - throw new ElasticsearchException("error during reduce phase", e); - } - long tookInMillis = Math.max(1, System.currentTimeMillis() - request.startTime); - return new PercolateResponse( - shardsResponses.length(), successfulShards, failedShards, shardFailures, - result.matches(), result.count(), tookInMillis, result.reducedAggregations() - ); - } - } - - @Override - protected PercolateShardRequest newShardRequest(int numShards, ShardRouting shard, PercolateRequest request) { - return new PercolateShardRequest(shard.shardId(), numShards, request); - } - - @Override - protected PercolateShardResponse newShardResponse() { - return new PercolateShardResponse(); - } - - @Override - protected GroupShardsIterator shards(ClusterState clusterState, PercolateRequest request, String[] concreteIndices) { - Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, request.routing(), request.indices()); - return clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference()); - } - - @Override - protected PercolateShardResponse shardOperation(PercolateShardRequest request) { + private void innerDoExecute(PercolateRequest request, BytesReference docSource, ActionListener listener) { + SearchRequest searchRequest; try { - return percolatorService.percolate(request); - } catch (Throwable e) { - logger.trace("{} failed to percolate", e, request.shardId()); - throw new PercolateException(request.shardId(), "failed to percolate", e); + searchRequest = createSearchRequest(request, docSource, queryRegistry, aggParsers, parseFieldMatcher); + } catch (IOException e) { + listener.onFailure(e); + return; } + client.search(searchRequest, new ActionListener() { + @Override + public void onResponse(SearchResponse searchResponse) { + try { + listener.onResponse(createPercolateResponse(searchResponse, request.onlyCount())); + } catch (Exception e) { + onFailure(e); + } + } + + @Override + public void onFailure(Throwable e) { + listener.onFailure(e); + } + }); + } + + public static SearchRequest createSearchRequest(PercolateRequest percolateRequest, BytesReference documentSource, IndicesQueriesRegistry queryRegistry, AggregatorParsers aggParsers, ParseFieldMatcher parseFieldMatcher) throws IOException { + SearchRequest searchRequest = new SearchRequest(); + if (percolateRequest.indices() != null) { + searchRequest.indices(percolateRequest.indices()); + } + searchRequest.indicesOptions(percolateRequest.indicesOptions()); + searchRequest.routing(percolateRequest.routing()); + searchRequest.preference(percolateRequest.preference()); + + BytesReference querySource = null; + XContentBuilder searchSource = XContentFactory.jsonBuilder().startObject(); + if (percolateRequest.source() != null && percolateRequest.source().length() > 0) { + try (XContentParser parser = XContentHelper.createParser(percolateRequest.source())) { + String currentFieldName = null; + XContentParser.Token token = parser.nextToken(); + if (token != XContentParser.Token.START_OBJECT) { + throw new IllegalArgumentException("Unknown token [" + token+ "]"); + } + + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + if ("doc".equals(currentFieldName)) { + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.copyCurrentStructure(parser); + builder.flush(); + documentSource = builder.bytes(); + } else if ("query".equals(currentFieldName) || "filter".equals(currentFieldName)) { + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.copyCurrentStructure(parser); + builder.flush(); + querySource = builder.bytes(); + } else if ("sort".equals(currentFieldName)) { + searchSource.field("sort"); + searchSource.copyCurrentStructure(parser); + } else if ("aggregations".equals(currentFieldName)) { + searchSource.field("aggregations"); + searchSource.copyCurrentStructure(parser); + } else if ("highlight".equals(currentFieldName)) { + searchSource.field("highlight"); + searchSource.copyCurrentStructure(parser); + } else { + throw new IllegalArgumentException("Unknown field [" + currentFieldName+ "]"); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if ("sort".equals(currentFieldName)) { + searchSource.field("sort"); + searchSource.copyCurrentStructure(parser); + } else { + throw new IllegalArgumentException("Unknown field [" + currentFieldName+ "]"); + } + } else if (token.isValue()) { + if ("size".equals(currentFieldName)) { + searchSource.field("size", parser.intValue()); + } else if ("sort".equals(currentFieldName)) { + searchSource.field("sort", parser.text()); + } else if ("track_scores".equals(currentFieldName) || "trackScores".equals(currentFieldName)) { + searchSource.field("track_scores", parser.booleanValue()); + } else { + throw new IllegalArgumentException("Unknown field [" + currentFieldName+ "]"); + } + } else { + throw new IllegalArgumentException("Unknown token [" + token + "]"); + } + } + } + } + + if (percolateRequest.onlyCount()) { + searchSource.field("size", 0); + } + + PercolatorQueryBuilder percolatorQueryBuilder = new PercolatorQueryBuilder(percolateRequest.documentType(), documentSource); + if (querySource != null) { + QueryParseContext queryParseContext = new QueryParseContext(queryRegistry); + queryParseContext.reset(XContentHelper.createParser(querySource)); + queryParseContext.parseFieldMatcher(parseFieldMatcher); + QueryBuilder queryBuilder = queryParseContext.parseInnerQueryBuilder(); + BoolQueryBuilder boolQueryBuilder = QueryBuilders.boolQuery(); + boolQueryBuilder.must(queryBuilder); + boolQueryBuilder.filter(percolatorQueryBuilder); + searchSource.field("query", boolQueryBuilder); + } else { + searchSource.field("query", percolatorQueryBuilder); + } + + searchSource.endObject(); + searchSource.flush(); + BytesReference source = searchSource.bytes(); + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + QueryParseContext context = new QueryParseContext(queryRegistry); + try (XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(source)) { + context.reset(parser); + context.parseFieldMatcher(parseFieldMatcher); + searchSourceBuilder.parseXContent(parser, context, aggParsers, null); + searchRequest.source(searchSourceBuilder); + return searchRequest; + } + } + + public static PercolateResponse createPercolateResponse(SearchResponse searchResponse, boolean onlyCount) { + SearchHits hits = searchResponse.getHits(); + PercolateResponse.Match[] matches; + if (onlyCount) { + matches = null; + } else { + matches = new PercolateResponse.Match[hits.getHits().length]; + for (int i = 0; i < hits.getHits().length; i++) { + SearchHit hit = hits.getHits()[i]; + matches[i] = new PercolateResponse.Match(new Text(hit.getIndex()), new Text(hit.getId()), hit.getScore(), hit.getHighlightFields()); + } + } + + return new PercolateResponse( + searchResponse.getTotalShards(), searchResponse.getSuccessfulShards(), searchResponse.getFailedShards(), + Arrays.asList(searchResponse.getShardFailures()), matches, hits.getTotalHits(), searchResponse.getTookInMillis(), (InternalAggregations) searchResponse.getAggregations() + ); } } diff --git a/core/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java b/core/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java deleted file mode 100644 index 0732d4d4066..00000000000 --- a/core/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java +++ /dev/null @@ -1,281 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.percolate; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.IndicesRequest; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.TransportActions; -import org.elasticsearch.action.support.single.shard.SingleShardRequest; -import org.elasticsearch.action.support.single.shard.TransportSingleShardAction; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.routing.ShardIterator; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.percolator.PercolatorService; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -/** - */ -public class TransportShardMultiPercolateAction extends TransportSingleShardAction { - - private final PercolatorService percolatorService; - - private static final String ACTION_NAME = MultiPercolateAction.NAME + "[shard]"; - - @Inject - public TransportShardMultiPercolateAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, - TransportService transportService, PercolatorService percolatorService, - ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, ACTION_NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, - Request::new, ThreadPool.Names.PERCOLATE); - this.percolatorService = percolatorService; - } - - @Override - protected boolean isSubAction() { - return true; - } - - @Override - protected Response newResponse() { - return new Response(); - } - - @Override - protected boolean resolveIndex(Request request) { - return false; - } - - @Override - protected ShardIterator shards(ClusterState state, InternalRequest request) { - return clusterService.operationRouting().getShards( - state, request.concreteIndex(), request.request().shardId(), request.request().preference - ); - } - - @Override - protected Response shardOperation(Request request, ShardId shardId) { - // TODO: Look into combining the shard req's docs into one in memory index. - Response response = new Response(); - response.items = new ArrayList<>(request.items.size()); - for (Request.Item item : request.items) { - Response.Item responseItem; - int slot = item.slot; - try { - responseItem = new Response.Item(slot, percolatorService.percolate(item.request)); - } catch (Throwable t) { - if (TransportActions.isShardNotAvailableException(t)) { - throw (ElasticsearchException) t; - } else { - logger.debug("{} failed to multi percolate", t, request.shardId()); - responseItem = new Response.Item(slot, t); - } - } - response.items.add(responseItem); - } - return response; - } - - - public static class Request extends SingleShardRequest implements IndicesRequest { - - private int shardId; - private String preference; - private List items; - - public Request() { - } - - Request(String concreteIndex, int shardId, String preference) { - super(concreteIndex); - this.shardId = shardId; - this.preference = preference; - this.items = new ArrayList<>(); - } - - @Override - public ActionRequestValidationException validate() { - return super.validateNonNullIndex(); - } - - @Override - public String[] indices() { - List indices = new ArrayList<>(); - for (Item item : items) { - Collections.addAll(indices, item.request.indices()); - } - return indices.toArray(new String[indices.size()]); - } - - public int shardId() { - return shardId; - } - - public void add(Item item) { - items.add(item); - } - - public List items() { - return items; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - shardId = in.readVInt(); - preference = in.readOptionalString(); - int size = in.readVInt(); - items = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - int slot = in.readVInt(); - PercolateShardRequest shardRequest = new PercolateShardRequest(); - shardRequest.readFrom(in); - Item item = new Item(slot, shardRequest); - items.add(item); - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeVInt(shardId); - out.writeOptionalString(preference); - out.writeVInt(items.size()); - for (Item item : items) { - out.writeVInt(item.slot); - item.request.writeTo(out); - } - } - - static class Item { - - private final int slot; - private final PercolateShardRequest request; - - public Item(int slot, PercolateShardRequest request) { - this.slot = slot; - this.request = request; - } - - public int slot() { - return slot; - } - - public PercolateShardRequest request() { - return request; - } - - } - - } - - public static class Response extends ActionResponse { - - private List items; - - public List items() { - return items; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeVInt(items.size()); - for (Item item : items) { - out.writeVInt(item.slot); - if (item.response != null) { - out.writeBoolean(true); - item.response.writeTo(out); - } else { - out.writeBoolean(false); - out.writeThrowable(item.error); - } - } - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - int size = in.readVInt(); - items = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - int slot = in.readVInt(); - if (in.readBoolean()) { - PercolateShardResponse shardResponse = new PercolateShardResponse(); - shardResponse.readFrom(in); - items.add(new Item(slot, shardResponse)); - } else { - items.add(new Item(slot, in.readThrowable())); - } - } - } - - public static class Item { - - private final int slot; - private final PercolateShardResponse response; - private final Throwable error; - - public Item(Integer slot, PercolateShardResponse response) { - this.slot = slot; - this.response = response; - this.error = null; - } - - public Item(Integer slot, Throwable error) { - this.slot = slot; - this.error = error; - this.response = null; - } - - public int slot() { - return slot; - } - - public PercolateShardResponse response() { - return response; - } - - public Throwable error() { - return error; - } - - public boolean failed() { - return error != null; - } - } - - } - -} diff --git a/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 830a54778e1..732e9098ee7 100644 --- a/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -25,7 +25,6 @@ import org.apache.lucene.search.TopDocs; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.support.TransportActions; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -34,6 +33,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.util.concurrent.AtomicArray; @@ -96,7 +96,7 @@ abstract class AbstractSearchAsyncAction // TODO: I think startTime() should become part of ActionRequest and that should be used both for index name // date math expressions and $now in scripts. This way all apis will deal with now in the same way instead // of just for the _search api - String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request.indicesOptions(), + String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request.indicesOptions(), startTime(), request.indices()); for (String index : concreteIndices) { diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java index 56d0fedd40c..f7cb72b22e9 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java @@ -21,9 +21,9 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.action.SearchTransportService; diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java index f2dcefa7554..c5f320f1b33 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java @@ -23,9 +23,9 @@ import com.carrotsearch.hppc.IntArrayList; import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.SearchShardTarget; diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java index dcbf9b5091f..1b338847762 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java @@ -21,9 +21,9 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.search.action.SearchTransportService; import org.elasticsearch.search.controller.SearchPhaseController; diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java index e15b9da8acb..3feb40411f9 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -23,9 +23,9 @@ import com.carrotsearch.hppc.IntArrayList; import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.SearchShardTarget; diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index ffca87de22c..931df24a256 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -393,9 +393,7 @@ public class SearchRequestBuilder extends ActionRequestBuilder> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, searchRequest.routing(), searchRequest.indices()); int shardCount = clusterService.operationRouting().searchShardsCount(clusterState, concreteIndices, routingMap); diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java b/core/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java index 03009cc0102..3bcadda1725 100644 --- a/core/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java @@ -22,8 +22,8 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.action.SearchTransportService; diff --git a/core/src/main/java/org/elasticsearch/action/suggest/ShardSuggestRequest.java b/core/src/main/java/org/elasticsearch/action/suggest/ShardSuggestRequest.java index 80facf74878..ad15bd27440 100644 --- a/core/src/main/java/org/elasticsearch/action/suggest/ShardSuggestRequest.java +++ b/core/src/main/java/org/elasticsearch/action/suggest/ShardSuggestRequest.java @@ -20,10 +20,10 @@ package org.elasticsearch.action.suggest; import org.elasticsearch.action.support.broadcast.BroadcastShardRequest; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.suggest.SuggestBuilder; import java.io.IOException; @@ -32,29 +32,29 @@ import java.io.IOException; */ public final class ShardSuggestRequest extends BroadcastShardRequest { - private BytesReference suggestSource; + private SuggestBuilder suggest; public ShardSuggestRequest() { } ShardSuggestRequest(ShardId shardId, SuggestRequest request) { super(shardId, request); - this.suggestSource = request.suggest(); + this.suggest = request.suggest(); } - public BytesReference suggest() { - return suggestSource; + public SuggestBuilder suggest() { + return suggest; } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - suggestSource = in.readBytesReference(); + suggest = SuggestBuilder.PROTOTYPE.readFrom(in); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeBytesReference(suggestSource); + suggest.writeTo(out); } } diff --git a/core/src/main/java/org/elasticsearch/action/suggest/SuggestRequest.java b/core/src/main/java/org/elasticsearch/action/suggest/SuggestRequest.java index 0d1c4932d48..1398dd1dcf1 100644 --- a/core/src/main/java/org/elasticsearch/action/suggest/SuggestRequest.java +++ b/core/src/main/java/org/elasticsearch/action/suggest/SuggestRequest.java @@ -21,27 +21,25 @@ package org.elasticsearch.action.suggest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.broadcast.BroadcastRequest; -import org.elasticsearch.client.Requests; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.search.suggest.SuggestBuilder; import java.io.IOException; import java.util.Arrays; +import java.util.Objects; /** * A request to get suggestions for corrections of phrases. Best created with * {@link org.elasticsearch.client.Requests#suggestRequest(String...)}. *

- * The request requires the suggest query source to be set either using - * {@link #suggest(org.elasticsearch.common.bytes.BytesReference)} / {@link #suggest(org.elasticsearch.common.bytes.BytesReference)} - * or by using {@link #suggest(org.elasticsearch.search.suggest.SuggestBuilder)} - * (Best created using the {link @org.elasticsearch.search.suggest.SuggestBuilders)}). + * The request requires the suggest query source to be set using + * {@link #suggest(org.elasticsearch.search.suggest.SuggestBuilder)} * * @see SuggestResponse * @see org.elasticsearch.client.Client#suggest(SuggestRequest) @@ -56,7 +54,7 @@ public final class SuggestRequest extends BroadcastRequest { @Nullable private String preference; - private BytesReference suggestSource; + private SuggestBuilder suggest; public SuggestRequest() { } @@ -76,40 +74,21 @@ public final class SuggestRequest extends BroadcastRequest { } /** - * The Phrase to get correction suggestions for + * The suggestion query to get correction suggestions for */ - public BytesReference suggest() { - return suggestSource; + public SuggestBuilder suggest() { + return suggest; } - + /** - * set a new source for the suggest query + * set a new source for the suggest query */ - public SuggestRequest suggest(BytesReference suggestSource) { - this.suggestSource = suggestSource; + public SuggestRequest suggest(SuggestBuilder suggest) { + Objects.requireNonNull(suggest, "suggest must not be null"); + this.suggest = suggest; return this; } - /** - * set a new source using a {@link org.elasticsearch.search.suggest.SuggestBuilder} - * for phrase and term suggestion lookup - */ - public SuggestRequest suggest(SuggestBuilder suggestBuilder) { - return suggest(suggestBuilder.buildAsBytes(Requests.CONTENT_TYPE)); - } - - /** - * set a new source using a {@link org.elasticsearch.search.suggest.SuggestBuilder.SuggestionBuilder} - * for completion suggestion lookup - */ - public SuggestRequest suggest(SuggestBuilder.SuggestionBuilder suggestionBuilder) { - return suggest(suggestionBuilder.buildAsBytes(Requests.CONTENT_TYPE)); - } - - public SuggestRequest suggest(String source) { - return suggest(new BytesArray(source)); - } - /** * A comma separated list of routing values to control the shards the search will be executed on. */ @@ -147,25 +126,29 @@ public final class SuggestRequest extends BroadcastRequest { super.readFrom(in); routing = in.readOptionalString(); preference = in.readOptionalString(); - suggest(in.readBytesReference()); + suggest = SuggestBuilder.PROTOTYPE.readFrom(in); } @Override public void writeTo(StreamOutput out) throws IOException { + Objects.requireNonNull(suggest, "suggest must not be null"); super.writeTo(out); out.writeOptionalString(routing); out.writeOptionalString(preference); - out.writeBytesReference(suggestSource); + suggest.writeTo(out); } @Override public String toString() { + Objects.requireNonNull(suggest, "suggest must not be null"); String sSource = "_na_"; try { - sSource = XContentHelper.convertToJson(suggestSource, false); + XContentBuilder builder = JsonXContent.contentBuilder(); + builder = suggest.toXContent(builder, ToXContent.EMPTY_PARAMS); + sSource = builder.string(); } catch (Exception e) { // ignore } - return "[" + Arrays.toString(indices) + "]" + ", suggestSource[" + sSource + "]"; + return "[" + Arrays.toString(indices) + "]" + ", suggest[" + sSource + "]"; } } diff --git a/core/src/main/java/org/elasticsearch/action/suggest/SuggestRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/suggest/SuggestRequestBuilder.java index 06a2b00c648..b64745bf400 100644 --- a/core/src/main/java/org/elasticsearch/action/suggest/SuggestRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/suggest/SuggestRequestBuilder.java @@ -19,17 +19,10 @@ package org.elasticsearch.action.suggest; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -import org.elasticsearch.client.Requests; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.search.suggest.SuggestBuilder; -import org.elasticsearch.search.suggest.SuggestBuilder.SuggestionBuilder; - -import java.io.IOException; +import org.elasticsearch.search.suggest.SuggestionBuilder; /** * A suggest action request builder. @@ -44,9 +37,11 @@ public class SuggestRequestBuilder extends BroadcastOperationRequestBuilder SuggestRequestBuilder addSuggestion(SuggestionBuilder suggestion) { - suggest.addSuggestion(suggestion); + public SuggestRequestBuilder addSuggestion(String name, SuggestionBuilder suggestion) { + suggest.addSuggestion(name, suggestion); return this; } @@ -59,7 +54,7 @@ public class SuggestRequestBuilder extends BroadcastOperationRequestBuilder { +public class TransportSuggestAction + extends TransportBroadcastAction { private final IndicesService indicesService; private final SuggestPhase suggestPhase; @Inject - public TransportSuggestAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, - IndicesService indicesService, SuggestPhase suggestPhase, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver) { + public TransportSuggestAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, + TransportService transportService, IndicesService indicesService, SuggestPhase suggestPhase, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { super(settings, SuggestAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, SuggestRequest::new, ShardSuggestRequest::new, ThreadPool.Names.SUGGEST); this.indicesService = indicesService; @@ -85,7 +84,8 @@ public class TransportSuggestAction extends TransportBroadcastAction> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, request.routing(), request.indices()); + Map> routingMap = + indexNameExpressionResolver.resolveSearchRouting(clusterState, request.routing(), request.indices()); return clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference()); } @@ -124,7 +124,8 @@ public class TransportSuggestAction extends TransportBroadcastAction 0) { - parser = XContentFactory.xContent(suggest).createParser(suggest); - if (parser.nextToken() != XContentParser.Token.START_OBJECT) { - throw new IllegalArgumentException("suggest content missing"); - } - final SuggestionSearchContext context = suggestPhase.parseElement().parseInternal(parser, indexService.mapperService(), - indexService.fieldData(), request.shardId().getIndexName(), request.shardId().id()); + SuggestBuilder suggest = request.suggest(); + if (suggest != null) { + final SuggestionSearchContext context = suggest.build(indexService.newQueryShardContext()); final Suggest result = suggestPhase.execute(context, searcher.searcher()); return new ShardSuggestResponse(request.shardId(), result); } @@ -151,9 +146,6 @@ public class TransportSuggestAction extends TransportBroadcastAction AUTO_CREATE_INDEX_SETTING = new Setting<>("action.auto_create_index", "true", AutoCreate::new, false, Setting.Scope.CLUSTER); + public static final Setting AUTO_CREATE_INDEX_SETTING = + new Setting<>("action.auto_create_index", "true", AutoCreate::new, Property.NodeScope); private final boolean dynamicMappingDisabled; private final IndexNameExpressionResolver resolver; diff --git a/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java b/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java index 5f2fb33e043..31fc1d06175 100644 --- a/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java +++ b/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; /** @@ -33,7 +34,8 @@ public final class DestructiveOperations extends AbstractComponent { /** * Setting which controls whether wildcard usage (*, prefix*, _all) is allowed. */ - public static final Setting REQUIRES_NAME_SETTING = Setting.boolSetting("action.destructive_requires_name", false, true, Setting.Scope.CLUSTER); + public static final Setting REQUIRES_NAME_SETTING = + Setting.boolSetting("action.destructive_requires_name", false, Property.Dynamic, Property.NodeScope); private volatile boolean destructiveRequiresName; @Inject diff --git a/core/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java b/core/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java index e39fb0288ac..182d922fc39 100644 --- a/core/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java @@ -24,7 +24,6 @@ import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.TransportActions; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -33,6 +32,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; @@ -125,7 +125,7 @@ public abstract class TransportBroadcastAction, Response extends ActionResponse> extends TransportMasterNodeAction { - public static final Setting FORCE_LOCAL_SETTING = Setting.boolSetting("action.master.force_local", false, false, Setting.Scope.CLUSTER); + public static final Setting FORCE_LOCAL_SETTING = + Setting.boolSetting("action.master.force_local", false, Property.NodeScope); private final boolean forceLocal; diff --git a/core/src/main/java/org/elasticsearch/action/support/master/info/TransportClusterInfoAction.java b/core/src/main/java/org/elasticsearch/action/support/master/info/TransportClusterInfoAction.java index 7e42036c1d1..66b9fce5d71 100644 --- a/core/src/main/java/org/elasticsearch/action/support/master/info/TransportClusterInfoAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/master/info/TransportClusterInfoAction.java @@ -22,9 +22,9 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -50,7 +50,7 @@ public abstract class TransportClusterInfoAction listener) { - String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); + String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request); doMasterOperation(request, concreteIndices, state, listener); } diff --git a/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java b/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java index 7e2702afd8a..9c021efbe40 100644 --- a/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java @@ -26,11 +26,11 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ChildTaskRequest; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java index a15819e82c7..25de821e227 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.support.replication; import com.carrotsearch.hppc.cursors.IntObjectCursor; - import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ReplicationResponse; @@ -32,11 +31,11 @@ import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.action.support.broadcast.BroadcastRequest; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.index.shard.ShardId; @@ -97,7 +96,7 @@ public abstract class TransportBroadcastReplicationAction shards(Request request, ClusterState clusterState) { List shardIds = new ArrayList<>(); - String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request); + String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request); for (String index : concreteIndices) { IndexMetaData indexMetaData = clusterState.metaData().getIndices().get(index); if (indexMetaData != null) { diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 7fc18266816..1ddddbf8888 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -28,10 +28,8 @@ import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.action.support.TransportActions; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; -import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -43,6 +41,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; @@ -53,6 +52,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.shard.IndexShard; @@ -102,7 +102,6 @@ public abstract class TransportReplicationAction request, Supplier replicaRequest, String executor) { super(settings, actionName, threadPool, actionFilters, indexNameExpressionResolver, transportService.getTaskManager()); @@ -120,7 +119,6 @@ public abstract class TransportReplicationAction() { diff --git a/core/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java b/core/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java index 2d13b7f99ac..0c7f0627c66 100644 --- a/core/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java @@ -25,7 +25,6 @@ import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.action.support.TransportActions; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -34,6 +33,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.common.settings.Settings; @@ -141,7 +141,7 @@ public abstract class TransportSingleShardAction> extends * Sets the list of action masks for the actions that should be returned */ @SuppressWarnings("unchecked") - public final Request actions(String... actions) { + public final Request setActions(String... actions) { this.actions = actions; return (Request) this; } @@ -79,16 +79,16 @@ public class BaseTasksRequest> extends /** * Return the list of action masks for the actions that should be returned */ - public String[] actions() { + public String[] getActions() { return actions; } - public final String[] nodesIds() { + public final String[] getNodesIds() { return nodesIds; } @SuppressWarnings("unchecked") - public final Request nodesIds(String... nodesIds) { + public final Request setNodesIds(String... nodesIds) { this.nodesIds = nodesIds; return (Request) this; } @@ -98,12 +98,12 @@ public class BaseTasksRequest> extends * * By default tasks with any ids are returned. */ - public TaskId taskId() { + public TaskId getTaskId() { return taskId; } @SuppressWarnings("unchecked") - public final Request taskId(TaskId taskId) { + public final Request setTaskId(TaskId taskId) { this.taskId = taskId; return (Request) this; } @@ -112,29 +112,29 @@ public class BaseTasksRequest> extends /** * Returns the parent task id that tasks should be filtered by */ - public TaskId parentTaskId() { + public TaskId getParentTaskId() { return parentTaskId; } @SuppressWarnings("unchecked") - public Request parentTaskId(TaskId parentTaskId) { + public Request setParentTaskId(TaskId parentTaskId) { this.parentTaskId = parentTaskId; return (Request) this; } - public TimeValue timeout() { + public TimeValue getTimeout() { return this.timeout; } @SuppressWarnings("unchecked") - public final Request timeout(TimeValue timeout) { + public final Request setTimeout(TimeValue timeout) { this.timeout = timeout; return (Request) this; } @SuppressWarnings("unchecked") - public final Request timeout(String timeout) { + public final Request setTimeout(String timeout) { this.timeout = TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".timeout"); return (Request) this; } @@ -162,11 +162,11 @@ public class BaseTasksRequest> extends } public boolean match(Task task) { - if (actions() != null && actions().length > 0 && Regex.simpleMatch(actions(), task.getAction()) == false) { + if (getActions() != null && getActions().length > 0 && Regex.simpleMatch(getActions(), task.getAction()) == false) { return false; } - if (taskId().isSet() == false) { - if(taskId().getId() != task.getId()) { + if (getTaskId().isSet() == false) { + if(getTaskId().getId() != task.getId()) { return false; } } diff --git a/core/src/main/java/org/elasticsearch/action/support/tasks/TasksRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/support/tasks/TasksRequestBuilder.java index a7265ce9998..a510a847c62 100644 --- a/core/src/main/java/org/elasticsearch/action/support/tasks/TasksRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/support/tasks/TasksRequestBuilder.java @@ -35,19 +35,19 @@ public class TasksRequestBuilder , Res @SuppressWarnings("unchecked") public final RequestBuilder setNodesIds(String... nodesIds) { - request.nodesIds(nodesIds); + request.setNodesIds(nodesIds); return (RequestBuilder) this; } @SuppressWarnings("unchecked") public final RequestBuilder setActions(String... actions) { - request.actions(actions); + request.setActions(actions); return (RequestBuilder) this; } @SuppressWarnings("unchecked") public final RequestBuilder setTimeout(TimeValue timeout) { - request.timeout(timeout); + request.setTimeout(timeout); return (RequestBuilder) this; } } diff --git a/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java b/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java index f10b9f23327..a14c6e00e14 100644 --- a/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java @@ -28,11 +28,11 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ChildTaskRequest; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -124,25 +124,25 @@ public abstract class TransportTasksAction< } protected String[] resolveNodes(TasksRequest request, ClusterState clusterState) { - if (request.taskId().isSet()) { - return clusterState.nodes().resolveNodesIds(request.nodesIds()); + if (request.getTaskId().isSet()) { + return clusterState.nodes().resolveNodesIds(request.getNodesIds()); } else { - return new String[]{request.taskId().getNodeId()}; + return new String[]{request.getTaskId().getNodeId()}; } } protected void processTasks(TasksRequest request, Consumer operation) { - if (request.taskId().isSet() == false) { + if (request.getTaskId().isSet() == false) { // we are only checking one task, we can optimize it - Task task = taskManager.getTask(request.taskId().getId()); + Task task = taskManager.getTask(request.getTaskId().getId()); if (task != null) { if (request.match(task)) { operation.accept((OperationTask) task); } else { - throw new ResourceNotFoundException("task [{}] doesn't support this operation", request.taskId()); + throw new ResourceNotFoundException("task [{}] doesn't support this operation", request.getTaskId()); } } else { - throw new ResourceNotFoundException("task [{}] is missing", request.taskId()); + throw new ResourceNotFoundException("task [{}] is missing", request.getTaskId()); } } else { for (Task task : taskManager.getTasks().values()) { @@ -224,8 +224,8 @@ public abstract class TransportTasksAction< } } else { TransportRequestOptions.Builder builder = TransportRequestOptions.builder(); - if (request.timeout() != null) { - builder.withTimeout(request.timeout()); + if (request.getTimeout() != null) { + builder.withTimeout(request.getTimeout()); } builder.withCompress(transportCompress()); for (int i = 0; i < nodesIds.length; i++) { diff --git a/core/src/main/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsAction.java b/core/src/main/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsAction.java index 7047ee69040..d71958cefde 100644 --- a/core/src/main/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsAction.java +++ b/core/src/main/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsAction.java @@ -20,13 +20,12 @@ package org.elasticsearch.action.termvectors; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AtomicArray; @@ -72,7 +71,7 @@ public class TransportMultiTermVectorsAction extends HandledTransportAction listener, final int retryCount) { - final IndexService indexService = indicesService.indexServiceSafe(request.concreteIndex()); - final IndexShard indexShard = indexService.getShard(request.shardId()); + final ShardId shardId = request.getShardId(); + final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); + final IndexShard indexShard = indexService.getShard(shardId.getId()); final UpdateHelper.Result result = updateHelper.prepare(request, indexShard); switch (result.operation()) { case UPSERT: @@ -194,7 +196,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio if (e instanceof VersionConflictEngineException) { if (retryCount < request.retryOnConflict()) { logger.trace("Retry attempt [{}] of [{}] on version conflict on [{}][{}][{}]", - retryCount + 1, request.retryOnConflict(), request.index(), request.shardId(), request.id()); + retryCount + 1, request.retryOnConflict(), request.index(), request.getShardId(), request.id()); threadPool.executor(executor()).execute(new ActionRunnable(listener) { @Override protected void doRun() { @@ -267,9 +269,9 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio break; case NONE: UpdateResponse update = result.action(); - IndexService indexServiceOrNull = indicesService.indexService(request.concreteIndex()); + IndexService indexServiceOrNull = indicesService.indexService(shardId.getIndex()); if (indexServiceOrNull != null) { - IndexShard shard = indexService.getShardOrNull(request.shardId()); + IndexShard shard = indexService.getShardOrNull(shardId.getId()); if (shard != null) { shard.noopUpdate(request.type()); } diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index 6bc69ed4d9c..0877ea1c66b 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptParameterParser; import org.elasticsearch.script.ScriptParameterParser.ScriptParameterValue; @@ -43,6 +44,7 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptService.ScriptType; import java.io.IOException; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -88,7 +90,7 @@ public class UpdateRequest extends InstanceShardOperationRequest } public UpdateRequest(String index, String type, String id) { - this.index = index; + super(index); this.type = type; this.id = id; } @@ -195,7 +197,7 @@ public class UpdateRequest extends InstanceShardOperationRequest return parent; } - int shardId() { + public ShardId getShardId() { return this.shardId; } @@ -670,9 +672,15 @@ public class UpdateRequest extends InstanceShardOperationRequest } else if ("detect_noop".equals(currentFieldName)) { detectNoop(parser.booleanValue()); } else if ("fields".equals(currentFieldName)) { - List values = parser.list(); - String[] fields = values.toArray(new String[values.size()]); - fields(fields); + List fields = null; + if (token == XContentParser.Token.START_ARRAY) { + fields = (List) parser.list(); + } else if (token.isValue()) { + fields = Collections.singletonList(parser.text()); + } + if (fields != null) { + fields(fields.toArray(new String[fields.size()])); + } } else { //here we don't have settings available, unable to throw deprecation exceptions scriptParameterParser.token(currentFieldName, token, parser, ParseFieldMatcher.EMPTY); diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index 215659054d2..2a8984e59d4 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -24,10 +24,10 @@ import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.StringHelper; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; +import org.elasticsearch.cli.Terminal; import org.elasticsearch.common.PidFile; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.cli.CliTool; -import org.elasticsearch.common.cli.Terminal; import org.elasticsearch.common.inject.CreationException; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.LogConfigurator; @@ -45,10 +45,9 @@ import java.io.IOException; import java.io.PrintStream; import java.nio.file.Path; import java.util.Locale; +import java.util.Map; import java.util.concurrent.CountDownLatch; -import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; - /** * Internal startup code. */ @@ -189,9 +188,13 @@ final class Bootstrap { node = new Node(nodeSettings); } - private static Environment initialSettings(boolean foreground) { + private static Environment initialSettings(boolean foreground, String pidFile) { Terminal terminal = foreground ? Terminal.DEFAULT : null; - return InternalSettingsPreparer.prepareEnvironment(EMPTY_SETTINGS, terminal); + Settings.Builder builder = Settings.builder(); + if (Strings.hasLength(pidFile)) { + builder.put(Environment.PIDFILE_SETTING.getKey(), pidFile); + } + return InternalSettingsPreparer.prepareEnvironment(builder.build(), terminal); } private void start() { @@ -218,22 +221,18 @@ final class Bootstrap { * This method is invoked by {@link Elasticsearch#main(String[])} * to startup elasticsearch. */ - static void init(String[] args) throws Throwable { + static void init( + final boolean foreground, + final String pidFile, + final Map esSettings) throws Throwable { // Set the system property before anything has a chance to trigger its use initLoggerPrefix(); - BootstrapCLIParser bootstrapCLIParser = new BootstrapCLIParser(); - CliTool.ExitStatus status = bootstrapCLIParser.execute(args); - - if (CliTool.ExitStatus.OK != status) { - exit(status.status()); - } + elasticsearchSettings(esSettings); INSTANCE = new Bootstrap(); - boolean foreground = !"false".equals(System.getProperty("es.foreground", System.getProperty("es-foreground"))); - - Environment environment = initialSettings(foreground); + Environment environment = initialSettings(foreground, pidFile); Settings settings = environment.settings(); LogConfigurator.configure(settings, true); checkForCustomConfFile(); @@ -297,6 +296,13 @@ final class Bootstrap { } } + @SuppressForbidden(reason = "Sets system properties passed as CLI parameters") + private static void elasticsearchSettings(Map esSettings) { + for (Map.Entry esSetting : esSettings.entrySet()) { + System.setProperty(esSetting.getKey(), esSetting.getValue()); + } + } + @SuppressForbidden(reason = "System#out") private static void closeSystOut() { System.out.close(); @@ -307,14 +313,6 @@ final class Bootstrap { System.err.close(); } - @SuppressForbidden(reason = "System#err") - private static void sysError(String line, boolean flush) { - System.err.println(line); - if (flush) { - System.err.flush(); - } - } - private static void checkForCustomConfFile() { String confFileSetting = System.getProperty("es.default.config"); checkUnsetAndMaybeExit(confFileSetting, "es.default.config"); diff --git a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCLIParser.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCLIParser.java deleted file mode 100644 index ca67fc91132..00000000000 --- a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCLIParser.java +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.bootstrap; - -import org.apache.commons.cli.CommandLine; -import org.apache.commons.cli.Option; -import org.elasticsearch.Build; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.cli.CliTool; -import org.elasticsearch.common.cli.CliToolConfig; -import org.elasticsearch.common.cli.Terminal; -import org.elasticsearch.common.cli.UserError; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.monitor.jvm.JvmInfo; - -import java.util.HashMap; -import java.util.Iterator; -import java.util.Locale; -import java.util.Map; -import java.util.Properties; -import java.util.Set; - -import static org.elasticsearch.common.cli.CliToolConfig.Builder.cmd; -import static org.elasticsearch.common.cli.CliToolConfig.Builder.optionBuilder; - -final class BootstrapCLIParser extends CliTool { - - private static final CliToolConfig CONFIG = CliToolConfig.config("elasticsearch", BootstrapCLIParser.class) - .cmds(Start.CMD, Version.CMD) - .build(); - - public BootstrapCLIParser() { - super(CONFIG); - } - - public BootstrapCLIParser(Terminal terminal) { - super(CONFIG, terminal); - } - - @Override - protected Command parse(String cmdName, CommandLine cli) throws Exception { - switch (cmdName.toLowerCase(Locale.ROOT)) { - case Start.NAME: - return Start.parse(terminal, cli); - case Version.NAME: - return Version.parse(terminal, cli); - default: - assert false : "should never get here, if the user enters an unknown command, an error message should be shown before parse is called"; - return null; - } - } - - static class Version extends CliTool.Command { - - private static final String NAME = "version"; - - private static final CliToolConfig.Cmd CMD = cmd(NAME, Version.class).build(); - - public static Command parse(Terminal terminal, CommandLine cli) { - return new Version(terminal); - } - - public Version(Terminal terminal) { - super(terminal); - } - - @Override - public ExitStatus execute(Settings settings, Environment env) throws Exception { - terminal.println("Version: " + org.elasticsearch.Version.CURRENT - + ", Build: " + Build.CURRENT.shortHash() + "/" + Build.CURRENT.date() - + ", JVM: " + JvmInfo.jvmInfo().version()); - return ExitStatus.OK_AND_EXIT; - } - } - - static class Start extends CliTool.Command { - - private static final String NAME = "start"; - - private static final CliToolConfig.Cmd CMD = cmd(NAME, Start.class) - .options( - optionBuilder("d", "daemonize").hasArg(false).required(false), - optionBuilder("p", "pidfile").hasArg(true).required(false), - optionBuilder("V", "version").hasArg(false).required(false), - Option.builder("D").argName("property=value").valueSeparator('=').numberOfArgs(2) - ) - .stopAtNonOption(true) // needed to parse the --foo.bar options, so this parser must be lenient - .build(); - - // TODO: don't use system properties as a way to do this, its horrible... - @SuppressForbidden(reason = "Sets system properties passed as CLI parameters") - public static Command parse(Terminal terminal, CommandLine cli) throws UserError { - if (cli.hasOption("V")) { - return Version.parse(terminal, cli); - } - - if (cli.hasOption("d")) { - System.setProperty("es.foreground", "false"); - } - - String pidFile = cli.getOptionValue("pidfile"); - if (!Strings.isNullOrEmpty(pidFile)) { - System.setProperty("es.pidfile", pidFile); - } - - if (cli.hasOption("D")) { - Properties properties = cli.getOptionProperties("D"); - for (Map.Entry entry : properties.entrySet()) { - String key = (String) entry.getKey(); - String propertyName = key.startsWith("es.") ? key : "es." + key; - System.setProperty(propertyName, entry.getValue().toString()); - } - } - - // hacky way to extract all the fancy extra args, there is no CLI tool helper for this - Iterator iterator = cli.getArgList().iterator(); - final Map properties = new HashMap<>(); - while (iterator.hasNext()) { - String arg = iterator.next(); - if (!arg.startsWith("--")) { - if (arg.startsWith("-D") || arg.startsWith("-d") || arg.startsWith("-p")) { - throw new UserError(ExitStatus.USAGE, - "Parameter [" + arg + "] starting with \"-D\", \"-d\" or \"-p\" must be before any parameters starting with --" - ); - } else { - throw new UserError(ExitStatus.USAGE, "Parameter [" + arg + "]does not start with --"); - } - } - // if there is no = sign, we have to get the next argu - arg = arg.replace("--", ""); - if (arg.contains("=")) { - String[] splitArg = arg.split("=", 2); - String key = splitArg[0]; - String value = splitArg[1]; - properties.put("es." + key, value); - } else { - if (iterator.hasNext()) { - String value = iterator.next(); - if (value.startsWith("--")) { - throw new UserError(ExitStatus.USAGE, "Parameter [" + arg + "] needs value"); - } - properties.put("es." + arg, value); - } else { - throw new UserError(ExitStatus.USAGE, "Parameter [" + arg + "] needs value"); - } - } - } - for (Map.Entry entry : properties.entrySet()) { - System.setProperty(entry.getKey(), entry.getValue()); - } - return new Start(terminal); - } - - public Start(Terminal terminal) { - super(terminal); - - } - - @Override - public ExitStatus execute(Settings settings, Environment env) throws Exception { - return ExitStatus.OK; - } - } - -} diff --git a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java index 6ac3c477fd7..433dd4498a4 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java @@ -225,7 +225,7 @@ final class BootstrapCheck { static class MaxNumberOfThreadsCheck implements Check { - private final long maxNumberOfThreadsThreshold = 1 << 15; + private final long maxNumberOfThreadsThreshold = 1 << 11; @Override public boolean check() { diff --git a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapSettings.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapSettings.java index a20ff9bb059..4e9dffc995b 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapSettings.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapSettings.java @@ -20,7 +20,7 @@ package org.elasticsearch.bootstrap; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Scope; +import org.elasticsearch.common.settings.Setting.Property; public final class BootstrapSettings { @@ -29,10 +29,13 @@ public final class BootstrapSettings { // TODO: remove this hack when insecure defaults are removed from java public static final Setting SECURITY_FILTER_BAD_DEFAULTS_SETTING = - Setting.boolSetting("security.manager.filter_bad_defaults", true, false, Scope.CLUSTER); + Setting.boolSetting("security.manager.filter_bad_defaults", true, Property.NodeScope); - public static final Setting MLOCKALL_SETTING = Setting.boolSetting("bootstrap.mlockall", false, false, Scope.CLUSTER); - public static final Setting SECCOMP_SETTING = Setting.boolSetting("bootstrap.seccomp", true, false, Scope.CLUSTER); - public static final Setting CTRLHANDLER_SETTING = Setting.boolSetting("bootstrap.ctrlhandler", true, false, Scope.CLUSTER); + public static final Setting MLOCKALL_SETTING = + Setting.boolSetting("bootstrap.mlockall", false, Property.NodeScope); + public static final Setting SECCOMP_SETTING = + Setting.boolSetting("bootstrap.seccomp", true, Property.NodeScope); + public static final Setting CTRLHANDLER_SETTING = + Setting.boolSetting("bootstrap.ctrlhandler", true, Property.NodeScope); } diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java index 107a955696c..0cc952907c0 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java @@ -19,23 +19,94 @@ package org.elasticsearch.bootstrap; +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import joptsimple.util.KeyValuePair; +import org.elasticsearch.Build; +import org.elasticsearch.cli.Command; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserError; +import org.elasticsearch.monitor.jvm.JvmInfo; + import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; /** * This class starts elasticsearch. */ -public final class Elasticsearch { +class Elasticsearch extends Command { - /** no instantiation */ - private Elasticsearch() {} + private final OptionSpec versionOption; + private final OptionSpec daemonizeOption; + private final OptionSpec pidfileOption; + private final OptionSpec propertyOption; + + // visible for testing + Elasticsearch() { + super("starts elasticsearch"); + // TODO: in jopt-simple 5.0, make this mutually exclusive with all other options + versionOption = parser.acceptsAll(Arrays.asList("V", "version"), + "Prints elasticsearch version information and exits"); + daemonizeOption = parser.acceptsAll(Arrays.asList("d", "daemonize"), + "Starts Elasticsearch in the background"); + // TODO: in jopt-simple 5.0 this option type can be a Path + pidfileOption = parser.acceptsAll(Arrays.asList("p", "pidfile"), + "Creates a pid file in the specified path on start") + .withRequiredArg(); + propertyOption = parser.accepts("E", "Configure an Elasticsearch setting").withRequiredArg().ofType(KeyValuePair.class); + } /** * Main entry point for starting elasticsearch */ - public static void main(String[] args) throws StartupError { + public static void main(final String[] args) throws Exception { + final Elasticsearch elasticsearch = new Elasticsearch(); + int status = main(args, elasticsearch, Terminal.DEFAULT); + if (status != ExitCodes.OK) { + exit(status); + } + } + + static int main(final String[] args, final Elasticsearch elasticsearch, final Terminal terminal) throws Exception { + return elasticsearch.main(args, terminal); + } + + @Override + protected void execute(Terminal terminal, OptionSet options) throws Exception { + if (options.has(versionOption)) { + if (options.has(daemonizeOption) || options.has(pidfileOption)) { + throw new UserError(ExitCodes.USAGE, "Elasticsearch version option is mutually exclusive with any other option"); + } + terminal.println("Version: " + org.elasticsearch.Version.CURRENT + + ", Build: " + Build.CURRENT.shortHash() + "/" + Build.CURRENT.date() + + ", JVM: " + JvmInfo.jvmInfo().version()); + return; + } + + final boolean daemonize = options.has(daemonizeOption); + final String pidFile = pidfileOption.value(options); + + final Map esSettings = new HashMap<>(); + for (final KeyValuePair kvp : propertyOption.values(options)) { + if (!kvp.key.startsWith("es.")) { + throw new UserError(ExitCodes.USAGE, "Elasticsearch settings must be prefixed with [es.] but was [" + kvp.key + "]"); + } + if (kvp.value.isEmpty()) { + throw new UserError(ExitCodes.USAGE, "Elasticsearch setting [" + kvp.key + "] must not be empty"); + } + esSettings.put(kvp.key, kvp.value); + } + + init(daemonize, pidFile, esSettings); + } + + void init(final boolean daemonize, final String pidFile, final Map esSettings) { try { - Bootstrap.init(args); - } catch (Throwable t) { + Bootstrap.init(!daemonize, pidFile, esSettings); + } catch (final Throwable t) { // format exceptions to the console in a special way // to avoid 2MB stacktraces from guice, etc. throw new StartupError(t); diff --git a/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java b/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java index b9d5ce11dbc..0ea8da6a9be 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java @@ -76,7 +76,7 @@ class JNANatives { softLimit = rlimit.rlim_cur.longValue(); hardLimit = rlimit.rlim_max.longValue(); } else { - logger.warn("Unable to retrieve resource limits: " + JNACLibrary.strerror(Native.getLastError())); + logger.warn("Unable to retrieve resource limits: {}", JNACLibrary.strerror(Native.getLastError())); } } } catch (UnsatisfiedLinkError e) { @@ -85,19 +85,20 @@ class JNANatives { } // mlockall failed for some reason - logger.warn("Unable to lock JVM Memory: error=" + errno + ",reason=" + errMsg); + logger.warn("Unable to lock JVM Memory: error={}, reason={}", errno , errMsg); logger.warn("This can result in part of the JVM being swapped out."); if (errno == JNACLibrary.ENOMEM) { if (rlimitSuccess) { - logger.warn("Increase RLIMIT_MEMLOCK, soft limit: " + rlimitToString(softLimit) + ", hard limit: " + rlimitToString(hardLimit)); + logger.warn("Increase RLIMIT_MEMLOCK, soft limit: {}, hard limit: {}", rlimitToString(softLimit), rlimitToString(hardLimit)); if (Constants.LINUX) { // give specific instructions for the linux case to make it easy String user = System.getProperty("user.name"); logger.warn("These can be adjusted by modifying /etc/security/limits.conf, for example: \n" + - "\t# allow user '" + user + "' mlockall\n" + - "\t" + user + " soft memlock unlimited\n" + - "\t" + user + " hard memlock unlimited" - ); + "\t# allow user '{}' mlockall\n" + + "\t{} soft memlock unlimited\n" + + "\t{} hard memlock unlimited", + user, user, user + ); logger.warn("If you are logged in interactively, you will have to re-login for the new limits to take effect."); } } else { @@ -155,7 +156,7 @@ class JNANatives { // the amount of memory we wish to lock, plus a small overhead (1MB). SizeT size = new SizeT(JvmInfo.jvmInfo().getMem().getHeapInit().getBytes() + (1024 * 1024)); if (!kernel.SetProcessWorkingSetSize(process, size, size)) { - logger.warn("Unable to lock JVM memory. Failed to set working set size. Error code " + Native.getLastError()); + logger.warn("Unable to lock JVM memory. Failed to set working set size. Error code {}", Native.getLastError()); } else { JNAKernel32Library.MemoryBasicInformation memInfo = new JNAKernel32Library.MemoryBasicInformation(); long address = 0; @@ -188,7 +189,7 @@ class JNANatives { if (result) { logger.debug("console ctrl handler correctly set"); } else { - logger.warn("unknown error " + Native.getLastError() + " when adding console ctrl handler:"); + logger.warn("unknown error {} when adding console ctrl handler", Native.getLastError()); } } catch (UnsatisfiedLinkError e) { // this will have already been logged by Kernel32Library, no need to repeat it diff --git a/core/src/main/java/org/elasticsearch/bootstrap/JVMCheck.java b/core/src/main/java/org/elasticsearch/bootstrap/JVMCheck.java index 3f81cd035bd..86629e4fa36 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/JVMCheck.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/JVMCheck.java @@ -200,7 +200,7 @@ final class JVMCheck { HotSpotCheck bug = JVM_BROKEN_HOTSPOT_VERSIONS.get(Constants.JVM_VERSION); if (bug != null && bug.check()) { if (bug.getWorkaround().isPresent() && ManagementFactory.getRuntimeMXBean().getInputArguments().contains(bug.getWorkaround().get())) { - Loggers.getLogger(JVMCheck.class).warn(bug.getWarningMessage().get()); + Loggers.getLogger(JVMCheck.class).warn("{}", bug.getWarningMessage().get()); } else { throw new RuntimeException(bug.getErrorMessage()); } diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Seccomp.java b/core/src/main/java/org/elasticsearch/bootstrap/Seccomp.java index 4325c5b7aef..46908e60642 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Seccomp.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Seccomp.java @@ -394,7 +394,7 @@ final class Seccomp { method = 0; int errno1 = Native.getLastError(); if (logger.isDebugEnabled()) { - logger.debug("seccomp(SECCOMP_SET_MODE_FILTER): " + JNACLibrary.strerror(errno1) + ", falling back to prctl(PR_SET_SECCOMP)..."); + logger.debug("seccomp(SECCOMP_SET_MODE_FILTER): {}, falling back to prctl(PR_SET_SECCOMP)...", JNACLibrary.strerror(errno1)); } if (linux_prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, pointer, 0, 0) != 0) { int errno2 = Native.getLastError(); diff --git a/core/src/main/java/org/elasticsearch/cache/recycler/PageCacheRecycler.java b/core/src/main/java/org/elasticsearch/cache/recycler/PageCacheRecycler.java index a293428192b..2fad8678649 100644 --- a/core/src/main/java/org/elasticsearch/cache/recycler/PageCacheRecycler.java +++ b/core/src/main/java/org/elasticsearch/cache/recycler/PageCacheRecycler.java @@ -19,13 +19,13 @@ package org.elasticsearch.cache.recycler; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.recycler.AbstractRecyclerC; import org.elasticsearch.common.recycler.Recycler; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; @@ -43,13 +43,19 @@ import static org.elasticsearch.common.recycler.Recyclers.none; /** A recycler of fixed-size pages. */ public class PageCacheRecycler extends AbstractComponent implements Releasable { - public static final Setting TYPE_SETTING = new Setting<>("cache.recycler.page.type", Type.CONCURRENT.name(), Type::parse, false, Setting.Scope.CLUSTER); - public static final Setting LIMIT_HEAP_SETTING = Setting.byteSizeSetting("cache.recycler.page.limit.heap", "10%", false, Setting.Scope.CLUSTER); - public static final Setting WEIGHT_BYTES_SETTING = Setting.doubleSetting("cache.recycler.page.weight.bytes", 1d, 0d, false, Setting.Scope.CLUSTER); - public static final Setting WEIGHT_LONG_SETTING = Setting.doubleSetting("cache.recycler.page.weight.longs", 1d, 0d, false, Setting.Scope.CLUSTER); - public static final Setting WEIGHT_INT_SETTING = Setting.doubleSetting("cache.recycler.page.weight.ints", 1d, 0d, false, Setting.Scope.CLUSTER); + public static final Setting TYPE_SETTING = + new Setting<>("cache.recycler.page.type", Type.CONCURRENT.name(), Type::parse, Property.NodeScope); + public static final Setting LIMIT_HEAP_SETTING = + Setting.byteSizeSetting("cache.recycler.page.limit.heap", "10%", Property.NodeScope); + public static final Setting WEIGHT_BYTES_SETTING = + Setting.doubleSetting("cache.recycler.page.weight.bytes", 1d, 0d, Property.NodeScope); + public static final Setting WEIGHT_LONG_SETTING = + Setting.doubleSetting("cache.recycler.page.weight.longs", 1d, 0d, Property.NodeScope); + public static final Setting WEIGHT_INT_SETTING = + Setting.doubleSetting("cache.recycler.page.weight.ints", 1d, 0d, Property.NodeScope); // object pages are less useful to us so we give them a lower weight by default - public static final Setting WEIGHT_OBJECTS_SETTING = Setting.doubleSetting("cache.recycler.page.weight.objects", 0.1d, 0d, false, Setting.Scope.CLUSTER); + public static final Setting WEIGHT_OBJECTS_SETTING = + Setting.doubleSetting("cache.recycler.page.weight.objects", 0.1d, 0d, Property.NodeScope); private final Recycler bytePage; private final Recycler intPage; diff --git a/core/src/main/java/org/elasticsearch/cli/Command.java b/core/src/main/java/org/elasticsearch/cli/Command.java new file mode 100644 index 00000000000..9e6afdd6638 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/cli/Command.java @@ -0,0 +1,112 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cli; + +import java.io.IOException; +import java.util.Arrays; + +import joptsimple.OptionException; +import joptsimple.OptionParser; +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import org.elasticsearch.common.SuppressForbidden; + +/** + * An action to execute within a cli. + */ +public abstract class Command { + + /** A description of the command, used in the help output. */ + protected final String description; + + /** The option parser for this command. */ + protected final OptionParser parser = new OptionParser(); + + private final OptionSpec helpOption = parser.acceptsAll(Arrays.asList("h", "help"), "show help").forHelp(); + private final OptionSpec silentOption = parser.acceptsAll(Arrays.asList("s", "silent"), "show minimal output"); + private final OptionSpec verboseOption = parser.acceptsAll(Arrays.asList("v", "verbose"), "show verbose output"); + + public Command(String description) { + this.description = description; + } + + /** Parses options for this command from args and executes it. */ + public final int main(String[] args, Terminal terminal) throws Exception { + try { + mainWithoutErrorHandling(args, terminal); + } catch (OptionException e) { + printHelp(terminal); + terminal.println(Terminal.Verbosity.SILENT, "ERROR: " + e.getMessage()); + return ExitCodes.USAGE; + } catch (UserError e) { + terminal.println(Terminal.Verbosity.SILENT, "ERROR: " + e.getMessage()); + return e.exitCode; + } + return ExitCodes.OK; + } + + /** + * Executes the command, but all errors are thrown. + */ + void mainWithoutErrorHandling(String[] args, Terminal terminal) throws Exception { + final OptionSet options = parser.parse(args); + + if (options.has(helpOption)) { + printHelp(terminal); + return; + } + + if (options.has(silentOption)) { + if (options.has(verboseOption)) { + // mutually exclusive, we can remove this with jopt-simple 5.0, which natively supports it + throw new UserError(ExitCodes.USAGE, "Cannot specify -s and -v together"); + } + terminal.setVerbosity(Terminal.Verbosity.SILENT); + } else if (options.has(verboseOption)) { + terminal.setVerbosity(Terminal.Verbosity.VERBOSE); + } else { + terminal.setVerbosity(Terminal.Verbosity.NORMAL); + } + + execute(terminal, options); + } + + /** Prints a help message for the command to the terminal. */ + private void printHelp(Terminal terminal) throws IOException { + terminal.println(description); + terminal.println(""); + printAdditionalHelp(terminal); + parser.printHelpOn(terminal.getWriter()); + } + + /** Prints additional help information, specific to the command */ + protected void printAdditionalHelp(Terminal terminal) {} + + @SuppressForbidden(reason = "Allowed to exit explicitly from #main()") + protected static void exit(int status) { + System.exit(status); + } + + /** + * Executes this command. + * + * Any runtime user errors (like an input file that does not exist), should throw a {@link UserError}. */ + protected abstract void execute(Terminal terminal, OptionSet options) throws Exception; +} diff --git a/core/src/main/java/org/elasticsearch/cli/ExitCodes.java b/core/src/main/java/org/elasticsearch/cli/ExitCodes.java new file mode 100644 index 00000000000..d08deb8b1ad --- /dev/null +++ b/core/src/main/java/org/elasticsearch/cli/ExitCodes.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cli; + +/** + * POSIX exit codes. + */ +public class ExitCodes { + public static final int OK = 0; + public static final int USAGE = 64; /* command line usage error */ + public static final int DATA_ERROR = 65; /* data format error */ + public static final int NO_INPUT = 66; /* cannot open input */ + public static final int NO_USER = 67; /* addressee unknown */ + public static final int NO_HOST = 68; /* host name unknown */ + public static final int UNAVAILABLE = 69; /* service unavailable */ + public static final int CODE_ERROR = 70; /* internal software error */ + public static final int CANT_CREATE = 73; /* can't create (user) output file */ + public static final int IO_ERROR = 74; /* input/output error */ + public static final int TEMP_FAILURE = 75; /* temp failure; user is invited to retry */ + public static final int PROTOCOL = 76; /* remote error in protocol */ + public static final int NOPERM = 77; /* permission denied */ + public static final int CONFIG = 78; /* configuration error */ + + private ExitCodes() { /* no instance, just constants */ } +} diff --git a/core/src/main/java/org/elasticsearch/cli/MultiCommand.java b/core/src/main/java/org/elasticsearch/cli/MultiCommand.java new file mode 100644 index 00000000000..a9feee0c9bf --- /dev/null +++ b/core/src/main/java/org/elasticsearch/cli/MultiCommand.java @@ -0,0 +1,71 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cli; + +import java.util.Arrays; +import java.util.LinkedHashMap; +import java.util.Map; + +import joptsimple.NonOptionArgumentSpec; +import joptsimple.OptionSet; + +/** + * A cli tool which is made up of multiple subcommands. + */ +public class MultiCommand extends Command { + + protected final Map subcommands = new LinkedHashMap<>(); + + private final NonOptionArgumentSpec arguments = parser.nonOptions("command"); + + public MultiCommand(String description) { + super(description); + parser.posixlyCorrect(true); + } + + @Override + protected void printAdditionalHelp(Terminal terminal) { + if (subcommands.isEmpty()) { + throw new IllegalStateException("No subcommands configured"); + } + terminal.println("Commands"); + terminal.println("--------"); + for (Map.Entry subcommand : subcommands.entrySet()) { + terminal.println(subcommand.getKey() + " - " + subcommand.getValue().description); + } + terminal.println(""); + } + + @Override + protected void execute(Terminal terminal, OptionSet options) throws Exception { + if (subcommands.isEmpty()) { + throw new IllegalStateException("No subcommands configured"); + } + String[] args = arguments.values(options).toArray(new String[0]); + if (args.length == 0) { + throw new UserError(ExitCodes.USAGE, "Missing command"); + } + Command subcommand = subcommands.get(args[0]); + if (subcommand == null) { + throw new UserError(ExitCodes.USAGE, "Unknown command [" + args[0] + "]"); + } + subcommand.mainWithoutErrorHandling(Arrays.copyOfRange(args, 1, args.length), terminal); + } +} diff --git a/core/src/main/java/org/elasticsearch/common/cli/Terminal.java b/core/src/main/java/org/elasticsearch/cli/Terminal.java similarity index 74% rename from core/src/main/java/org/elasticsearch/common/cli/Terminal.java rename to core/src/main/java/org/elasticsearch/cli/Terminal.java index 8d4a8036bdf..d2dc57263dc 100644 --- a/core/src/main/java/org/elasticsearch/common/cli/Terminal.java +++ b/core/src/main/java/org/elasticsearch/cli/Terminal.java @@ -17,18 +17,19 @@ * under the License. */ -package org.elasticsearch.common.cli; +package org.elasticsearch.cli; import java.io.BufferedReader; import java.io.Console; import java.io.IOException; import java.io.InputStreamReader; +import java.io.PrintWriter; import java.nio.charset.Charset; import org.elasticsearch.common.SuppressForbidden; /** - * A Terminal wraps access to reading input and writing output for a {@link CliTool}. + * A Terminal wraps access to reading input and writing output for a cli. * * The available methods are similar to those of {@link Console}, with the ability * to read either normal text or a password, and the ability to print a line @@ -52,8 +53,15 @@ public abstract class Terminal { /** The current verbosity for the terminal, defaulting to {@link Verbosity#NORMAL}. */ private Verbosity verbosity = Verbosity.NORMAL; + /** The newline used when calling println. */ + private final String lineSeparator; + + protected Terminal(String lineSeparator) { + this.lineSeparator = lineSeparator; + } + /** Sets the verbosity of the terminal. */ - void setVerbosity(Verbosity verbosity) { + public void setVerbosity(Verbosity verbosity) { this.verbosity = verbosity; } @@ -63,8 +71,8 @@ public abstract class Terminal { /** Reads password text from the terminal input. See {@link Console#readPassword()}}. */ public abstract char[] readSecret(String prompt); - /** Print a message directly to the terminal. */ - protected abstract void doPrint(String msg); + /** Returns a Writer which can be used to write to the terminal directly. */ + public abstract PrintWriter getWriter(); /** Prints a line to the terminal at {@link Verbosity#NORMAL} verbosity level. */ public final void println(String msg) { @@ -74,47 +82,60 @@ public abstract class Terminal { /** Prints a line to the terminal at {@code verbosity} level. */ public final void println(Verbosity verbosity, String msg) { if (this.verbosity.ordinal() >= verbosity.ordinal()) { - doPrint(msg + System.lineSeparator()); + getWriter().print(msg + lineSeparator); + getWriter().flush(); } } private static class ConsoleTerminal extends Terminal { - private static final Console console = System.console(); + private static final Console CONSOLE = System.console(); + + ConsoleTerminal() { + super(System.lineSeparator()); + } static boolean isSupported() { - return console != null; + return CONSOLE != null; } @Override - public void doPrint(String msg) { - console.printf("%s", msg); - console.flush(); + public PrintWriter getWriter() { + return CONSOLE.writer(); } @Override public String readText(String prompt) { - return console.readLine("%s", prompt); + return CONSOLE.readLine("%s", prompt); } @Override public char[] readSecret(String prompt) { - return console.readPassword("%s", prompt); + return CONSOLE.readPassword("%s", prompt); } } private static class SystemTerminal extends Terminal { + private static final PrintWriter WRITER = newWriter(); + + SystemTerminal() { + super(System.lineSeparator()); + } + + @SuppressForbidden(reason = "Writer for System.out") + private static PrintWriter newWriter() { + return new PrintWriter(System.out); + } + @Override - @SuppressForbidden(reason = "System#out") - public void doPrint(String msg) { - System.out.print(msg); - System.out.flush(); + public PrintWriter getWriter() { + return WRITER; } @Override public String readText(String text) { - doPrint(text); + getWriter().print(text); BufferedReader reader = new BufferedReader(new InputStreamReader(System.in, Charset.defaultCharset())); try { return reader.readLine(); diff --git a/core/src/main/java/org/elasticsearch/common/cli/UserError.java b/core/src/main/java/org/elasticsearch/cli/UserError.java similarity index 79% rename from core/src/main/java/org/elasticsearch/common/cli/UserError.java rename to core/src/main/java/org/elasticsearch/cli/UserError.java index ad709830885..2a4f2bf1233 100644 --- a/core/src/main/java/org/elasticsearch/common/cli/UserError.java +++ b/core/src/main/java/org/elasticsearch/cli/UserError.java @@ -17,19 +17,19 @@ * under the License. */ -package org.elasticsearch.common.cli; +package org.elasticsearch.cli; /** - * An exception representing a user fixable problem in {@link CliTool} usage. + * An exception representing a user fixable problem in {@link Command} usage. */ public class UserError extends Exception { /** The exist status the cli should use when catching this user error. */ - public final CliTool.ExitStatus exitStatus; + public final int exitCode; /** Constructs a UserError with an exit status and message to show the user. */ - public UserError(CliTool.ExitStatus exitStatus, String msg) { + public UserError(int exitCode, String msg) { super(msg); - this.exitStatus = exitStatus; + this.exitCode = exitCode; } } diff --git a/core/src/main/java/org/elasticsearch/client/Client.java b/core/src/main/java/org/elasticsearch/client/Client.java index f81ba9eb1b1..e5d8d4f55b7 100644 --- a/core/src/main/java/org/elasticsearch/client/Client.java +++ b/core/src/main/java/org/elasticsearch/client/Client.java @@ -19,12 +19,8 @@ package org.elasticsearch.client; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestBuilder; -import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; @@ -87,6 +83,7 @@ import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import java.util.Map; @@ -114,7 +111,7 @@ public interface Client extends ElasticsearchClient, Releasable { default: throw new IllegalArgumentException("Can't parse [client.type] must be one of [node, transport]"); } - }, false, Setting.Scope.CLUSTER); + }, Property.NodeScope); /** * The admin client that can be used to perform administrative operations. diff --git a/core/src/main/java/org/elasticsearch/client/Requests.java b/core/src/main/java/org/elasticsearch/client/Requests.java index 3cf4f3dc6cb..53c6abd971c 100644 --- a/core/src/main/java/org/elasticsearch/client/Requests.java +++ b/core/src/main/java/org/elasticsearch/client/Requests.java @@ -62,6 +62,7 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.action.suggest.SuggestRequest; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.search.suggest.SuggestBuilder; /** * A handy one stop shop for creating requests (make sure to import static this class). @@ -127,7 +128,7 @@ public class Requests { /** * Creates a suggest request for getting suggestions from provided indices. - * The suggest query has to be set using the JSON source using {@link org.elasticsearch.action.suggest.SuggestRequest#suggest(org.elasticsearch.common.bytes.BytesReference)}. + * The suggest query has to be set using {@link org.elasticsearch.action.suggest.SuggestRequest#suggest(SuggestBuilder)}. * @param indices The indices to suggest from. Use null or _all to execute against all indices * @see org.elasticsearch.client.Client#suggest(org.elasticsearch.action.suggest.SuggestRequest) */ @@ -342,7 +343,8 @@ public class Requests { /** * Creates a cluster health request. * - * @param indices The indices to provide additional cluster health information for. Use null or _all to execute against all indices + * @param indices The indices to provide additional cluster health information for. + * Use null or _all to execute against all indices * @return The cluster health request * @see org.elasticsearch.client.ClusterAdminClient#health(org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest) */ diff --git a/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java b/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java index 1e605b9de06..abbc5823b2a 100644 --- a/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java +++ b/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java @@ -34,6 +34,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; @@ -100,10 +101,14 @@ public class TransportClientNodesService extends AbstractComponent { private volatile boolean closed; - public static final Setting CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL = Setting.positiveTimeSetting("client.transport.nodes_sampler_interval", timeValueSeconds(5), false, Setting.Scope.CLUSTER); - public static final Setting CLIENT_TRANSPORT_PING_TIMEOUT = Setting.positiveTimeSetting("client.transport.ping_timeout", timeValueSeconds(5), false, Setting.Scope.CLUSTER); - public static final Setting CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME = Setting.boolSetting("client.transport.ignore_cluster_name", false, false, Setting.Scope.CLUSTER); - public static final Setting CLIENT_TRANSPORT_SNIFF = Setting.boolSetting("client.transport.sniff", false, false, Setting.Scope.CLUSTER); + public static final Setting CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL = + Setting.positiveTimeSetting("client.transport.nodes_sampler_interval", timeValueSeconds(5), Property.NodeScope); + public static final Setting CLIENT_TRANSPORT_PING_TIMEOUT = + Setting.positiveTimeSetting("client.transport.ping_timeout", timeValueSeconds(5), Property.NodeScope); + public static final Setting CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME = + Setting.boolSetting("client.transport.ignore_cluster_name", false, Property.NodeScope); + public static final Setting CLIENT_TRANSPORT_SNIFF = + Setting.boolSetting("client.transport.sniff", false, Property.NodeScope); @Inject public TransportClientNodesService(Settings settings, ClusterName clusterName, TransportService transportService, @@ -119,7 +124,7 @@ public class TransportClientNodesService extends AbstractComponent { this.ignoreClusterName = CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME.get(this.settings); if (logger.isDebugEnabled()) { - logger.debug("node_sampler_interval[" + nodesSamplerInterval + "]"); + logger.debug("node_sampler_interval[{}]", nodesSamplerInterval); } if (CLIENT_TRANSPORT_SNIFF.get(this.settings)) { @@ -318,7 +323,7 @@ public class TransportClientNodesService extends AbstractComponent { transportService.connectToNode(node); } catch (Throwable e) { it.remove(); - logger.debug("failed to connect to discovered node [" + node + "]", e); + logger.debug("failed to connect to discovered node [{}]", e, node); } } } diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java b/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java index e851b7814da..98853e8447f 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java @@ -23,6 +23,7 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.index.Index; import java.util.ArrayList; import java.util.Collections; @@ -120,7 +121,7 @@ public class ClusterChangedEvent { /** * Returns the indices deleted in this event */ - public List indicesDeleted() { + public List indicesDeleted() { // If the new cluster state has a new cluster UUID, the likely scenario is that a node was elected // master that has had its data directory wiped out, in which case we don't want to delete the indices and lose data; // rather we want to import them as dangling indices instead. So we check here if the cluster UUID differs from the previous @@ -131,17 +132,18 @@ public class ClusterChangedEvent { if (metaDataChanged() == false || isNewCluster()) { return Collections.emptyList(); } - List deleted = null; - for (ObjectCursor cursor : previousState.metaData().indices().keys()) { - String index = cursor.value; - if (!state.metaData().hasIndex(index)) { + List deleted = null; + for (ObjectCursor cursor : previousState.metaData().indices().values()) { + IndexMetaData index = cursor.value; + IndexMetaData current = state.metaData().index(index.getIndex()); + if (current == null) { if (deleted == null) { deleted = new ArrayList<>(); } - deleted.add(index); + deleted.add(index.getIndex()); } } - return deleted == null ? Collections.emptyList() : deleted; + return deleted == null ? Collections.emptyList() : deleted; } /** diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java index 3e668191ff3..47dd2ce9ae6 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -53,11 +53,12 @@ import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationD import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; -import org.elasticsearch.cluster.service.InternalClusterService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.ExtensionPoint; import org.elasticsearch.gateway.GatewayAllocator; @@ -74,7 +75,8 @@ public class ClusterModule extends AbstractModule { public static final String EVEN_SHARD_COUNT_ALLOCATOR = "even_shard"; public static final String BALANCED_ALLOCATOR = "balanced"; // default - public static final Setting SHARDS_ALLOCATOR_TYPE_SETTING = new Setting<>("cluster.routing.allocation.type", BALANCED_ALLOCATOR, Function.identity(), false, Setting.Scope.CLUSTER); + public static final Setting SHARDS_ALLOCATOR_TYPE_SETTING = + new Setting<>("cluster.routing.allocation.type", BALANCED_ALLOCATOR, Function.identity(), Property.NodeScope); public static final List> DEFAULT_ALLOCATION_DECIDERS = Collections.unmodifiableList(Arrays.asList( SameShardAllocationDecider.class, @@ -135,7 +137,8 @@ public class ClusterModule extends AbstractModule { bind(GatewayAllocator.class).asEagerSingleton(); bind(AllocationService.class).asEagerSingleton(); bind(DiscoveryNodeService.class).asEagerSingleton(); - bind(ClusterService.class).to(InternalClusterService.class).asEagerSingleton(); + bind(ClusterService.class).asEagerSingleton(); + bind(NodeConnectionsService.class).asEagerSingleton(); bind(OperationRouting.class).asEagerSingleton(); bind(MetaDataCreateIndexService.class).asEagerSingleton(); bind(MetaDataDeleteIndexService.class).asEagerSingleton(); diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterName.java b/core/src/main/java/org/elasticsearch/cluster/ClusterName.java index daf3000d710..09c64065dbd 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterName.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterName.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import java.io.IOException; @@ -37,7 +38,7 @@ public class ClusterName implements Streamable { throw new IllegalArgumentException("[cluster.name] must not be empty"); } return s; - }, false, Setting.Scope.CLUSTER); + }, Property.NodeScope); public static final ClusterName DEFAULT = new ClusterName(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY).intern()); diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterService.java b/core/src/main/java/org/elasticsearch/cluster/ClusterService.java deleted file mode 100644 index 27df4b9e96f..00000000000 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterService.java +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cluster; - -import org.elasticsearch.cluster.block.ClusterBlock; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.OperationRouting; -import org.elasticsearch.cluster.service.PendingClusterTask; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.component.LifecycleComponent; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.tasks.TaskManager; - -import java.util.List; - -/** - * The cluster service allowing to both register for cluster state events ({@link ClusterStateListener}) - * and submit state update tasks ({@link ClusterStateUpdateTask}. - */ -public interface ClusterService extends LifecycleComponent { - - /** - * The local node. - */ - DiscoveryNode localNode(); - - /** - * The current state. - */ - ClusterState state(); - - /** - * Adds an initial block to be set on the first cluster state created. - */ - void addInitialStateBlock(ClusterBlock block) throws IllegalStateException; - - /** - * Remove an initial block to be set on the first cluster state created. - */ - void removeInitialStateBlock(ClusterBlock block) throws IllegalStateException; - - /** - * Remove an initial block to be set on the first cluster state created. - */ - void removeInitialStateBlock(int blockId) throws IllegalStateException; - - /** - * The operation routing. - */ - OperationRouting operationRouting(); - - /** - * Adds a priority listener for updated cluster states. - */ - void addFirst(ClusterStateListener listener); - - /** - * Adds last listener. - */ - void addLast(ClusterStateListener listener); - - /** - * Adds a listener for updated cluster states. - */ - void add(ClusterStateListener listener); - - /** - * Removes a listener for updated cluster states. - */ - void remove(ClusterStateListener listener); - - /** - * Add a listener for on/off local node master events - */ - void add(LocalNodeMasterListener listener); - - /** - * Remove the given listener for on/off local master events - */ - void remove(LocalNodeMasterListener listener); - - /** - * Adds a cluster state listener that will timeout after the provided timeout, - * and is executed after the clusterstate has been successfully applied ie. is - * in state {@link org.elasticsearch.cluster.ClusterState.ClusterStateStatus#APPLIED} - * NOTE: a {@code null} timeout means that the listener will never be removed - * automatically - */ - void add(@Nullable TimeValue timeout, TimeoutClusterStateListener listener); - - /** - * Submits a cluster state update task; submitted updates will be - * batched across the same instance of executor. The exact batching - * semantics depend on the underlying implementation but a rough - * guideline is that if the update task is submitted while there - * are pending update tasks for the same executor, these update - * tasks will all be executed on the executor in a single batch - * - * @param source the source of the cluster state update task - * @param task the state needed for the cluster state update task - * @param config the cluster state update task configuration - * @param executor the cluster state update task executor; tasks - * that share the same executor will be executed - * batches on this executor - * @param listener callback after the cluster state update task - * completes - * @param the type of the cluster state update task state - */ - void submitStateUpdateTask(final String source, final T task, - final ClusterStateTaskConfig config, - final ClusterStateTaskExecutor executor, - final ClusterStateTaskListener listener); - - /** - * Submits a cluster state update task; unlike {@link #submitStateUpdateTask(String, Object, ClusterStateTaskConfig, ClusterStateTaskExecutor, ClusterStateTaskListener)}, - * submitted updates will not be batched. - * - * @param source the source of the cluster state update task - * @param updateTask the full context for the cluster state update - * task - */ - void submitStateUpdateTask(final String source, final ClusterStateUpdateTask updateTask); - - /** - * Returns the tasks that are pending. - */ - List pendingTasks(); - - /** - * Returns the number of currently pending tasks. - */ - int numberOfPendingTasks(); - - /** - * Returns the maximum wait time for tasks in the queue - * - * @return A zero time value if the queue is empty, otherwise the time value oldest task waiting in the queue - */ - TimeValue getMaxTaskWaitTime(); - - /** - * Returns task manager created in the cluster service - */ - TaskManager getTaskManager(); -} diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java index 1b3ddcfebf9..e6cc335a478 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -37,7 +37,7 @@ import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.cluster.service.InternalClusterService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -68,7 +68,7 @@ import java.util.Set; * exception of the {@link RoutingNodes} structure, which is built on demand from the {@link RoutingTable}, * and cluster state {@link #status}, which is updated during cluster state publishing and applying * processing. The cluster state can be updated only on the master node. All updates are performed by on a - * single thread and controlled by the {@link InternalClusterService}. After every update the + * single thread and controlled by the {@link ClusterService}. After every update the * {@link Discovery#publish} method publishes new version of the cluster state to all other nodes in the * cluster. The actual publishing mechanism is delegated to the {@link Discovery#publish} method and depends on * the type of discovery. For example, for local discovery it is implemented by the {@link LocalDiscovery#publish} diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java b/core/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java index dd30a711688..d79a00dc3fe 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java @@ -20,6 +20,7 @@ package org.elasticsearch.cluster; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.unit.TimeValue; @@ -44,7 +45,7 @@ public class ClusterStateObserver { } }; - private final ClusterService clusterService; + private final ClusterService clusterService; private final ThreadContext contextHolder; volatile TimeValue timeOutValue; diff --git a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java index 5107b4495ab..9a9ee06ce19 100644 --- a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java +++ b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java @@ -33,12 +33,14 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; @@ -64,8 +66,12 @@ import java.util.concurrent.TimeUnit; */ public class InternalClusterInfoService extends AbstractComponent implements ClusterInfoService, LocalNodeMasterListener, ClusterStateListener { - public static final Setting INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING = Setting.timeSetting("cluster.info.update.interval", TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(10), true, Setting.Scope.CLUSTER); - public static final Setting INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING = Setting.positiveTimeSetting("cluster.info.update.timeout", TimeValue.timeValueSeconds(15), true, Setting.Scope.CLUSTER); + public static final Setting INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING = + Setting.timeSetting("cluster.info.update.interval", TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(10), + Property.Dynamic, Property.NodeScope); + public static final Setting INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING = + Setting.positiveTimeSetting("cluster.info.update.timeout", TimeValue.timeValueSeconds(15), + Property.Dynamic, Property.NodeScope); private volatile TimeValue updateFrequency; diff --git a/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java b/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java new file mode 100644 index 00000000000..698f9d1090c --- /dev/null +++ b/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java @@ -0,0 +1,160 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster; + +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.FutureUtils; +import org.elasticsearch.common.util.concurrent.KeyedLock; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ScheduledFuture; + +import static org.elasticsearch.common.settings.Setting.Property; +import static org.elasticsearch.common.settings.Setting.positiveTimeSetting; + + +/** + * This component is responsible for connecting to nodes once they are added to the cluster state, and disconnect when they are + * removed. Also, it periodically checks that all connections are still open and if needed restores them. + * Note that this component is *not* responsible for removing nodes from the cluster if they disconnect / do not respond + * to pings. This is done by {@link org.elasticsearch.discovery.zen.fd.NodesFaultDetection}. Master fault detection + * is done by {@link org.elasticsearch.discovery.zen.fd.MasterFaultDetection}. + */ +public class NodeConnectionsService extends AbstractLifecycleComponent { + + public static final Setting CLUSTER_NODE_RECONNECT_INTERVAL_SETTING = + positiveTimeSetting("cluster.nodes.reconnect_interval", TimeValue.timeValueSeconds(10), Property.NodeScope); + private final ThreadPool threadPool; + private final TransportService transportService; + + // map between current node and the number of failed connection attempts. 0 means successfully connected. + // if a node doesn't appear in this list it shouldn't be monitored + private ConcurrentMap nodes = ConcurrentCollections.newConcurrentMap(); + + final private KeyedLock nodeLocks = new KeyedLock<>(); + + private final TimeValue reconnectInterval; + + private volatile ScheduledFuture backgroundFuture = null; + + @Inject + public NodeConnectionsService(Settings settings, ThreadPool threadPool, TransportService transportService) { + super(settings); + this.threadPool = threadPool; + this.transportService = transportService; + this.reconnectInterval = NodeConnectionsService.CLUSTER_NODE_RECONNECT_INTERVAL_SETTING.get(settings); + } + + public void connectToAddedNodes(ClusterChangedEvent event) { + + // TODO: do this in parallel (and wait) + for (final DiscoveryNode node : event.nodesDelta().addedNodes()) { + try (Releasable ignored = nodeLocks.acquire(node)) { + Integer current = nodes.put(node, 0); + assert current == null : "node " + node + " was added in event but already in internal nodes"; + validateNodeConnected(node); + } + } + } + + public void disconnectFromRemovedNodes(ClusterChangedEvent event) { + for (final DiscoveryNode node : event.nodesDelta().removedNodes()) { + try (Releasable ignored = nodeLocks.acquire(node)) { + Integer current = nodes.remove(node); + assert current != null : "node " + node + " was removed in event but not in internal nodes"; + try { + transportService.disconnectFromNode(node); + } catch (Throwable e) { + logger.warn("failed to disconnect to node [" + node + "]", e); + } + } + } + } + + void validateNodeConnected(DiscoveryNode node) { + assert nodeLocks.isHeldByCurrentThread(node) : "validateNodeConnected must be called under lock"; + if (lifecycle.stoppedOrClosed() || + nodes.containsKey(node) == false) { // we double check existence of node since connectToNode might take time... + // nothing to do + } else { + try { + // connecting to an already connected node is a noop + transportService.connectToNode(node); + nodes.put(node, 0); + } catch (Exception e) { + Integer nodeFailureCount = nodes.get(node); + assert nodeFailureCount != null : node + " didn't have a counter in nodes map"; + nodeFailureCount = nodeFailureCount + 1; + // log every 6th failure + if ((nodeFailureCount % 6) == 1) { + logger.warn("failed to connect to node {} (tried [{}] times)", e, node, nodeFailureCount); + } + nodes.put(node, nodeFailureCount); + } + } + } + + class ConnectionChecker extends AbstractRunnable { + + @Override + public void onFailure(Throwable t) { + logger.warn("unexpected error while checking for node reconnects", t); + } + + protected void doRun() { + for (DiscoveryNode node : nodes.keySet()) { + try (Releasable ignored = nodeLocks.acquire(node)) { + validateNodeConnected(node); + } + } + } + + @Override + public void onAfter() { + if (lifecycle.started()) { + backgroundFuture = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, this); + } + } + } + + @Override + protected void doStart() { + backgroundFuture = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, new ConnectionChecker()); + } + + @Override + protected void doStop() { + FutureUtils.cancel(backgroundFuture); + } + + @Override + protected void doClose() { + + } +} diff --git a/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java index 78eef316332..c90edee0d50 100644 --- a/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java +++ b/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java @@ -69,15 +69,17 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus private final State state; private final SnapshotId snapshotId; private final boolean includeGlobalState; + private final boolean partial; private final ImmutableOpenMap shards; private final List indices; private final ImmutableOpenMap> waitingIndices; private final long startTime; - public Entry(SnapshotId snapshotId, boolean includeGlobalState, State state, List indices, long startTime, ImmutableOpenMap shards) { + public Entry(SnapshotId snapshotId, boolean includeGlobalState, boolean partial, State state, List indices, long startTime, ImmutableOpenMap shards) { this.state = state; this.snapshotId = snapshotId; this.includeGlobalState = includeGlobalState; + this.partial = partial; this.indices = indices; this.startTime = startTime; if (shards == null) { @@ -90,7 +92,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus } public Entry(Entry entry, State state, ImmutableOpenMap shards) { - this(entry.snapshotId, entry.includeGlobalState, state, entry.indices, entry.startTime, shards); + this(entry.snapshotId, entry.includeGlobalState, entry.partial, state, entry.indices, entry.startTime, shards); } public Entry(Entry entry, ImmutableOpenMap shards) { @@ -121,6 +123,10 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus return includeGlobalState; } + public boolean partial() { + return partial; + } + public long startTime() { return startTime; } @@ -133,6 +139,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus Entry entry = (Entry) o; if (includeGlobalState != entry.includeGlobalState) return false; + if (partial != entry.partial) return false; if (startTime != entry.startTime) return false; if (!indices.equals(entry.indices)) return false; if (!shards.equals(entry.shards)) return false; @@ -148,6 +155,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus int result = state.hashCode(); result = 31 * result + snapshotId.hashCode(); result = 31 * result + (includeGlobalState ? 1 : 0); + result = 31 * result + (partial ? 1 : 0); result = 31 * result + shards.hashCode(); result = 31 * result + indices.hashCode(); result = 31 * result + waitingIndices.hashCode(); @@ -360,6 +368,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus for (int i = 0; i < entries.length; i++) { SnapshotId snapshotId = SnapshotId.readSnapshotId(in); boolean includeGlobalState = in.readBoolean(); + boolean partial = in.readBoolean(); State state = State.fromValue(in.readByte()); int indices = in.readVInt(); List indexBuilder = new ArrayList<>(); @@ -375,7 +384,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus State shardState = State.fromValue(in.readByte()); builder.put(shardId, new ShardSnapshotStatus(nodeId, shardState)); } - entries[i] = new Entry(snapshotId, includeGlobalState, state, Collections.unmodifiableList(indexBuilder), startTime, builder.build()); + entries[i] = new Entry(snapshotId, includeGlobalState, partial, state, Collections.unmodifiableList(indexBuilder), startTime, builder.build()); } return new SnapshotsInProgress(entries); } @@ -386,6 +395,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus for (Entry entry : entries) { entry.snapshotId().writeTo(out); out.writeBoolean(entry.includeGlobalState()); + out.writeBoolean(entry.partial()); out.writeByte(entry.state().value()); out.writeVInt(entry.indices().size()); for (String index : entry.indices()) { @@ -406,6 +416,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus static final XContentBuilderString SNAPSHOTS = new XContentBuilderString("snapshots"); static final XContentBuilderString SNAPSHOT = new XContentBuilderString("snapshot"); static final XContentBuilderString INCLUDE_GLOBAL_STATE = new XContentBuilderString("include_global_state"); + static final XContentBuilderString PARTIAL = new XContentBuilderString("partial"); static final XContentBuilderString STATE = new XContentBuilderString("state"); static final XContentBuilderString INDICES = new XContentBuilderString("indices"); static final XContentBuilderString START_TIME_MILLIS = new XContentBuilderString("start_time_millis"); @@ -431,6 +442,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus builder.field(Fields.REPOSITORY, entry.snapshotId().getRepository()); builder.field(Fields.SNAPSHOT, entry.snapshotId().getSnapshot()); builder.field(Fields.INCLUDE_GLOBAL_STATE, entry.includeGlobalState()); + builder.field(Fields.PARTIAL, entry.partial()); builder.field(Fields.STATE, entry.state()); builder.startArray(Fields.INDICES); { diff --git a/core/src/main/java/org/elasticsearch/cluster/ack/IndicesClusterStateUpdateRequest.java b/core/src/main/java/org/elasticsearch/cluster/ack/IndicesClusterStateUpdateRequest.java index c691abe5906..33d716cb965 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ack/IndicesClusterStateUpdateRequest.java +++ b/core/src/main/java/org/elasticsearch/cluster/ack/IndicesClusterStateUpdateRequest.java @@ -18,17 +18,19 @@ */ package org.elasticsearch.cluster.ack; +import org.elasticsearch.index.Index; + /** * Base cluster state update request that allows to execute update against multiple indices */ public abstract class IndicesClusterStateUpdateRequest> extends ClusterStateUpdateRequest { - private String[] indices; + private Index[] indices; /** * Returns the indices the operation needs to be executed on */ - public String[] indices() { + public Index[] indices() { return indices; } @@ -36,7 +38,7 @@ public abstract class IndicesClusterStateUpdateRequest INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING = Setting.positiveTimeSetting("indices.mapping.dynamic_timeout", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER); + public static final Setting INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING = + Setting.positiveTimeSetting("indices.mapping.dynamic_timeout", TimeValue.timeValueSeconds(30), + Property.Dynamic, Property.NodeScope); private IndicesAdminClient client; private volatile TimeValue dynamicMappingUpdateTimeout; @@ -62,48 +64,20 @@ public class MappingUpdatedAction extends AbstractComponent { this.client = client.admin().indices(); } - private PutMappingRequestBuilder updateMappingRequest(String index, String type, Mapping mappingUpdate, final TimeValue timeout) { + private PutMappingRequestBuilder updateMappingRequest(Index index, String type, Mapping mappingUpdate, final TimeValue timeout) { if (type.equals(MapperService.DEFAULT_MAPPING)) { throw new IllegalArgumentException("_default_ mapping should not be updated"); } - return client.preparePutMapping(index).setType(type).setSource(mappingUpdate.toString()) + return client.preparePutMapping().setConcreteIndex(index).setType(type).setSource(mappingUpdate.toString()) .setMasterNodeTimeout(timeout).setTimeout(timeout); } - public void updateMappingOnMaster(String index, String type, Mapping mappingUpdate, final TimeValue timeout, final MappingUpdateListener listener) { - final PutMappingRequestBuilder request = updateMappingRequest(index, type, mappingUpdate, timeout); - if (listener == null) { - request.execute(); - } else { - final ActionListener actionListener = new ActionListener() { - @Override - public void onResponse(PutMappingResponse response) { - if (response.isAcknowledged()) { - listener.onMappingUpdate(); - } else { - listener.onFailure(new TimeoutException("Failed to acknowledge the mapping response within [" + timeout + "]")); - } - } - - @Override - public void onFailure(Throwable e) { - listener.onFailure(e); - } - }; - request.execute(actionListener); - } - } - - public void updateMappingOnMasterAsynchronously(String index, String type, Mapping mappingUpdate) throws Exception { - updateMappingOnMaster(index, type, mappingUpdate, dynamicMappingUpdateTimeout, null); - } - /** - * Same as {@link #updateMappingOnMasterSynchronously(String, String, Mapping, TimeValue)} + * Same as {@link #updateMappingOnMaster(Index, String, Mapping, TimeValue)} * using the default timeout. */ - public void updateMappingOnMasterSynchronously(String index, String type, Mapping mappingUpdate) throws Exception { - updateMappingOnMasterSynchronously(index, type, mappingUpdate, dynamicMappingUpdateTimeout); + public void updateMappingOnMaster(Index index, String type, Mapping mappingUpdate) throws Exception { + updateMappingOnMaster(index, type, mappingUpdate, dynamicMappingUpdateTimeout); } /** @@ -111,19 +85,9 @@ public class MappingUpdatedAction extends AbstractComponent { * {@code timeout}. When this method returns successfully mappings have * been applied to the master node and propagated to data nodes. */ - public void updateMappingOnMasterSynchronously(String index, String type, Mapping mappingUpdate, TimeValue timeout) throws Exception { + public void updateMappingOnMaster(Index index, String type, Mapping mappingUpdate, TimeValue timeout) throws Exception { if (updateMappingRequest(index, type, mappingUpdate, timeout).get().isAcknowledged() == false) { throw new TimeoutException("Failed to acknowledge mapping update within [" + timeout + "]"); } } - - /** - * A listener to be notified when the mappings were updated - */ - public static interface MappingUpdateListener { - - void onMappingUpdate(); - - void onFailure(Throwable t); - } } diff --git a/core/src/main/java/org/elasticsearch/cluster/action/index/NodeIndexDeletedAction.java b/core/src/main/java/org/elasticsearch/cluster/action/index/NodeIndexDeletedAction.java index 012cc66e110..93fce95fc23 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/index/NodeIndexDeletedAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/index/NodeIndexDeletedAction.java @@ -76,18 +76,18 @@ public class NodeIndexDeletedAction extends AbstractComponent { listeners.remove(listener); } - public void nodeIndexDeleted(final ClusterState clusterState, final String index, final IndexSettings indexSettings, final String nodeId) { + public void nodeIndexDeleted(final ClusterState clusterState, final Index index, final IndexSettings indexSettings, final String nodeId) { final DiscoveryNodes nodes = clusterState.nodes(); transportService.sendRequest(clusterState.nodes().masterNode(), INDEX_DELETED_ACTION_NAME, new NodeIndexDeletedMessage(index, nodeId), EmptyTransportResponseHandler.INSTANCE_SAME); if (nodes.localNode().isDataNode() == false) { - logger.trace("[{}] not acking store deletion (not a data node)"); + logger.trace("[{}] not acking store deletion (not a data node)", index); return; } threadPool.generic().execute(new AbstractRunnable() { @Override public void onFailure(Throwable t) { - logger.warn("[{}]failed to ack index store deleted for index", t, index); + logger.warn("[{}] failed to ack index store deleted for index", t, index); } @Override @@ -97,7 +97,7 @@ public class NodeIndexDeletedAction extends AbstractComponent { }); } - private void lockIndexAndAck(String index, DiscoveryNodes nodes, String nodeId, ClusterState clusterState, IndexSettings indexSettings) throws IOException { + private void lockIndexAndAck(Index index, DiscoveryNodes nodes, String nodeId, ClusterState clusterState, IndexSettings indexSettings) throws IOException { try { // we are waiting until we can lock the index / all shards on the node and then we ack the delete of the store to the // master. If we can't acquire the locks here immediately there might be a shard of this index still holding on to the lock @@ -114,9 +114,9 @@ public class NodeIndexDeletedAction extends AbstractComponent { } public interface Listener { - void onNodeIndexDeleted(String index, String nodeId); + void onNodeIndexDeleted(Index index, String nodeId); - void onNodeIndexStoreDeleted(String index, String nodeId); + void onNodeIndexStoreDeleted(Index index, String nodeId); } private class NodeIndexDeletedTransportHandler implements TransportRequestHandler { @@ -143,13 +143,13 @@ public class NodeIndexDeletedAction extends AbstractComponent { public static class NodeIndexDeletedMessage extends TransportRequest { - String index; + Index index; String nodeId; public NodeIndexDeletedMessage() { } - NodeIndexDeletedMessage(String index, String nodeId) { + NodeIndexDeletedMessage(Index index, String nodeId) { this.index = index; this.nodeId = nodeId; } @@ -157,27 +157,27 @@ public class NodeIndexDeletedAction extends AbstractComponent { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeString(index); + index.writeTo(out); out.writeString(nodeId); } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - index = in.readString(); + index = new Index(in); nodeId = in.readString(); } } public static class NodeIndexStoreDeletedMessage extends TransportRequest { - String index; + Index index; String nodeId; public NodeIndexStoreDeletedMessage() { } - NodeIndexStoreDeletedMessage(String index, String nodeId) { + NodeIndexStoreDeletedMessage(Index index, String nodeId) { this.index = index; this.nodeId = nodeId; } @@ -185,14 +185,14 @@ public class NodeIndexDeletedAction extends AbstractComponent { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeString(index); + index.writeTo(out); out.writeString(nodeId); } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - index = in.readString(); + index = new Index(in); nodeId = in.readString(); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index c7f39015c18..68926368ddb 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -21,7 +21,6 @@ package org.elasticsearch.cluster.action.shard; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.ClusterStateTaskConfig; @@ -30,7 +29,6 @@ import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.MasterNodeChangePredicate; import org.elasticsearch.cluster.NotMasterException; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingService; @@ -38,6 +36,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.component.AbstractComponent; @@ -151,7 +150,7 @@ public class ShardStateAction extends AbstractComponent { @Override public void onNewClusterState(ClusterState state) { if (logger.isTraceEnabled()) { - logger.trace("new cluster state [{}] after waiting for master election to fail shard [{}]", shardRoutingEntry.getShardRouting().shardId(), state.prettyPrint(), shardRoutingEntry); + logger.trace("new cluster state [{}] after waiting for master election to fail shard [{}]", state.prettyPrint(), shardRoutingEntry); } sendShardAction(actionName, observer, shardRoutingEntry, listener); } @@ -321,7 +320,7 @@ public class ShardStateAction extends AbstractComponent { if (numberOfUnassignedShards > 0) { String reason = String.format(Locale.ROOT, "[%d] unassigned shards after failing shards", numberOfUnassignedShards); if (logger.isTraceEnabled()) { - logger.trace(reason + ", scheduling a reroute"); + logger.trace("{}, scheduling a reroute", reason); } routingService.reroute(reason); } diff --git a/core/src/main/java/org/elasticsearch/cluster/health/ClusterStateHealth.java b/core/src/main/java/org/elasticsearch/cluster/health/ClusterStateHealth.java index d66a2437ef2..42ab496fe33 100644 --- a/core/src/main/java/org/elasticsearch/cluster/health/ClusterStateHealth.java +++ b/core/src/main/java/org/elasticsearch/cluster/health/ClusterStateHealth.java @@ -79,7 +79,7 @@ public final class ClusterStateHealth implements Iterable, S * @param clusterState The current cluster state. Must not be null. */ public ClusterStateHealth(ClusterState clusterState) { - this(clusterState, clusterState.metaData().concreteAllIndices()); + this(clusterState, clusterState.metaData().getConcreteAllIndices()); } /** diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java b/core/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java index d9b288bb897..4b4a8e54d7c 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java @@ -20,6 +20,7 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; /** * This class acts as a functional wrapper around the index.auto_expand_replicas setting. @@ -56,7 +57,7 @@ final class AutoExpandReplicas { } } return new AutoExpandReplicas(min, max, true); - }, true, Setting.Scope.INDEX); + }, Property.Dynamic, Property.IndexScope); private final int minReplicas; private final int maxReplicas; diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index 7b4d5a68cec..20ba36dd910 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -38,6 +38,7 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.loader.SettingsLoader; import org.elasticsearch.common.xcontent.FromXContentBuilder; @@ -45,6 +46,8 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.gateway.MetaDataStateFormat; import org.elasticsearch.index.Index; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.rest.RestStatus; @@ -152,28 +155,36 @@ public class IndexMetaData implements Diffable, FromXContentBuild public static final String INDEX_SETTING_PREFIX = "index."; public static final String SETTING_NUMBER_OF_SHARDS = "index.number_of_shards"; - public static final Setting INDEX_NUMBER_OF_SHARDS_SETTING = Setting.intSetting(SETTING_NUMBER_OF_SHARDS, 5, 1, false, Setting.Scope.INDEX); + public static final Setting INDEX_NUMBER_OF_SHARDS_SETTING = + Setting.intSetting(SETTING_NUMBER_OF_SHARDS, 5, 1, Property.IndexScope); public static final String SETTING_NUMBER_OF_REPLICAS = "index.number_of_replicas"; - public static final Setting INDEX_NUMBER_OF_REPLICAS_SETTING = Setting.intSetting(SETTING_NUMBER_OF_REPLICAS, 1, 0, true, Setting.Scope.INDEX); + public static final Setting INDEX_NUMBER_OF_REPLICAS_SETTING = + Setting.intSetting(SETTING_NUMBER_OF_REPLICAS, 1, 0, Property.Dynamic, Property.IndexScope); public static final String SETTING_SHADOW_REPLICAS = "index.shadow_replicas"; - public static final Setting INDEX_SHADOW_REPLICAS_SETTING = Setting.boolSetting(SETTING_SHADOW_REPLICAS, false, false, Setting.Scope.INDEX); + public static final Setting INDEX_SHADOW_REPLICAS_SETTING = + Setting.boolSetting(SETTING_SHADOW_REPLICAS, false, Property.IndexScope); public static final String SETTING_SHARED_FILESYSTEM = "index.shared_filesystem"; - public static final Setting INDEX_SHARED_FILESYSTEM_SETTING = Setting.boolSetting(SETTING_SHARED_FILESYSTEM, false, false, Setting.Scope.INDEX); + public static final Setting INDEX_SHARED_FILESYSTEM_SETTING = + Setting.boolSetting(SETTING_SHARED_FILESYSTEM, false, Property.IndexScope); public static final String SETTING_AUTO_EXPAND_REPLICAS = "index.auto_expand_replicas"; public static final Setting INDEX_AUTO_EXPAND_REPLICAS_SETTING = AutoExpandReplicas.SETTING; public static final String SETTING_READ_ONLY = "index.blocks.read_only"; - public static final Setting INDEX_READ_ONLY_SETTING = Setting.boolSetting(SETTING_READ_ONLY, false, true, Setting.Scope.INDEX); + public static final Setting INDEX_READ_ONLY_SETTING = + Setting.boolSetting(SETTING_READ_ONLY, false, Property.Dynamic, Property.IndexScope); public static final String SETTING_BLOCKS_READ = "index.blocks.read"; - public static final Setting INDEX_BLOCKS_READ_SETTING = Setting.boolSetting(SETTING_BLOCKS_READ, false, true, Setting.Scope.INDEX); + public static final Setting INDEX_BLOCKS_READ_SETTING = + Setting.boolSetting(SETTING_BLOCKS_READ, false, Property.Dynamic, Property.IndexScope); public static final String SETTING_BLOCKS_WRITE = "index.blocks.write"; - public static final Setting INDEX_BLOCKS_WRITE_SETTING = Setting.boolSetting(SETTING_BLOCKS_WRITE, false, true, Setting.Scope.INDEX); + public static final Setting INDEX_BLOCKS_WRITE_SETTING = + Setting.boolSetting(SETTING_BLOCKS_WRITE, false, Property.Dynamic, Property.IndexScope); public static final String SETTING_BLOCKS_METADATA = "index.blocks.metadata"; - public static final Setting INDEX_BLOCKS_METADATA_SETTING = Setting.boolSetting(SETTING_BLOCKS_METADATA, false, true, Setting.Scope.INDEX); + public static final Setting INDEX_BLOCKS_METADATA_SETTING = + Setting.boolSetting(SETTING_BLOCKS_METADATA, false, Property.Dynamic, Property.IndexScope); public static final String SETTING_VERSION_CREATED = "index.version.created"; public static final String SETTING_VERSION_CREATED_STRING = "index.version.created_string"; @@ -182,24 +193,31 @@ public class IndexMetaData implements Diffable, FromXContentBuild public static final String SETTING_VERSION_MINIMUM_COMPATIBLE = "index.version.minimum_compatible"; public static final String SETTING_CREATION_DATE = "index.creation_date"; public static final String SETTING_PRIORITY = "index.priority"; - public static final Setting INDEX_PRIORITY_SETTING = Setting.intSetting("index.priority", 1, 0, true, Setting.Scope.INDEX); + public static final Setting INDEX_PRIORITY_SETTING = + Setting.intSetting("index.priority", 1, 0, Property.Dynamic, Property.IndexScope); public static final String SETTING_CREATION_DATE_STRING = "index.creation_date_string"; public static final String SETTING_INDEX_UUID = "index.uuid"; public static final String SETTING_DATA_PATH = "index.data_path"; - public static final Setting INDEX_DATA_PATH_SETTING = new Setting<>(SETTING_DATA_PATH, "", Function.identity(), false, Setting.Scope.INDEX); + public static final Setting INDEX_DATA_PATH_SETTING = + new Setting<>(SETTING_DATA_PATH, "", Function.identity(), Property.IndexScope); public static final String SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE = "index.shared_filesystem.recover_on_any_node"; - public static final Setting INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING = Setting.boolSetting(SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false, true, Setting.Scope.INDEX); + public static final Setting INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING = + Setting.boolSetting(SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false, Property.Dynamic, Property.IndexScope); public static final String INDEX_UUID_NA_VALUE = "_na_"; - public static final Setting INDEX_ROUTING_REQUIRE_GROUP_SETTING = Setting.groupSetting("index.routing.allocation.require.", true, Setting.Scope.INDEX); - public static final Setting INDEX_ROUTING_INCLUDE_GROUP_SETTING = Setting.groupSetting("index.routing.allocation.include.", true, Setting.Scope.INDEX); - public static final Setting INDEX_ROUTING_EXCLUDE_GROUP_SETTING = Setting.groupSetting("index.routing.allocation.exclude.", true, Setting.Scope.INDEX); + public static final Setting INDEX_ROUTING_REQUIRE_GROUP_SETTING = + Setting.groupSetting("index.routing.allocation.require.", Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_ROUTING_INCLUDE_GROUP_SETTING = + Setting.groupSetting("index.routing.allocation.include.", Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_ROUTING_EXCLUDE_GROUP_SETTING = + Setting.groupSetting("index.routing.allocation.exclude.", Property.Dynamic, Property.IndexScope); public static final IndexMetaData PROTO = IndexMetaData.builder("") .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) .numberOfShards(1).numberOfReplicas(0).build(); public static final String KEY_ACTIVE_ALLOCATIONS = "active_allocations"; + public static final String INDEX_STATE_FILE_PREFIX = "state-"; private final int numberOfShards; private final int numberOfReplicas; @@ -1008,4 +1026,21 @@ public class IndexMetaData implements Diffable, FromXContentBuild return builder.build(); } + private static final ToXContent.Params FORMAT_PARAMS = new MapParams(Collections.singletonMap("binary", "true")); + + /** + * State format for {@link IndexMetaData} to write to and load from disk + */ + public static final MetaDataStateFormat FORMAT = new MetaDataStateFormat(XContentType.SMILE, INDEX_STATE_FILE_PREFIX) { + + @Override + public void toXContent(XContentBuilder builder, IndexMetaData state) throws IOException { + Builder.toXContent(state, builder, FORMAT_PARAMS); + } + + @Override + public IndexMetaData fromXContent(XContentParser parser) throws IOException { + return Builder.fromXContent(parser); + } + }; } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index cca633a7651..2abbea04d51 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.joda.DateMathParser; import org.elasticsearch.common.joda.FormatDateTimeFormatter; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.indices.IndexClosedException; import org.joda.time.DateTimeZone; @@ -65,11 +66,20 @@ public class IndexNameExpressionResolver extends AbstractComponent { ); } + /** + * Same as {@link #concreteIndexNames(ClusterState, IndicesOptions, String...)}, but the index expressions and options + * are encapsulated in the specified request. + */ + public String[] concreteIndexNames(ClusterState state, IndicesRequest request) { + Context context = new Context(state, request.indicesOptions()); + return concreteIndexNames(context, request.indices()); + } + /** * Same as {@link #concreteIndices(ClusterState, IndicesOptions, String...)}, but the index expressions and options * are encapsulated in the specified request. */ - public String[] concreteIndices(ClusterState state, IndicesRequest request) { + public Index[] concreteIndices(ClusterState state, IndicesRequest request) { Context context = new Context(state, request.indicesOptions()); return concreteIndices(context, request.indices()); } @@ -87,7 +97,25 @@ public class IndexNameExpressionResolver extends AbstractComponent { * @throws IllegalArgumentException if one of the aliases resolve to multiple indices and the provided * indices options in the context don't allow such a case. */ - public String[] concreteIndices(ClusterState state, IndicesOptions options, String... indexExpressions) { + public String[] concreteIndexNames(ClusterState state, IndicesOptions options, String... indexExpressions) { + Context context = new Context(state, options); + return concreteIndexNames(context, indexExpressions); + } + + /** + * Translates the provided index expression into actual concrete indices, properly deduplicated. + * + * @param state the cluster state containing all the data to resolve to expressions to concrete indices + * @param options defines how the aliases or indices need to be resolved to concrete indices + * @param indexExpressions expressions that can be resolved to alias or index names. + * @return the resolved concrete indices based on the cluster state, indices options and index expressions + * @throws IndexNotFoundException if one of the index expressions is pointing to a missing index or alias and the + * provided indices options in the context don't allow such a case, or if the final result of the indices resolution + * contains no indices and the indices options in the context don't allow such a case. + * @throws IllegalArgumentException if one of the aliases resolve to multiple indices and the provided + * indices options in the context don't allow such a case. + */ + public Index[] concreteIndices(ClusterState state, IndicesOptions options, String... indexExpressions) { Context context = new Context(state, options); return concreteIndices(context, indexExpressions); } @@ -105,12 +133,21 @@ public class IndexNameExpressionResolver extends AbstractComponent { * @throws IllegalArgumentException if one of the aliases resolve to multiple indices and the provided * indices options in the context don't allow such a case. */ - public String[] concreteIndices(ClusterState state, IndicesOptions options, long startTime, String... indexExpressions) { + public String[] concreteIndexNames(ClusterState state, IndicesOptions options, long startTime, String... indexExpressions) { Context context = new Context(state, options, startTime); - return concreteIndices(context, indexExpressions); + return concreteIndexNames(context, indexExpressions); } - String[] concreteIndices(Context context, String... indexExpressions) { + String[] concreteIndexNames(Context context, String... indexExpressions) { + Index[] indexes = concreteIndices(context, indexExpressions); + String[] names = new String[indexes.length]; + for (int i = 0; i < indexes.length; i++) { + names[i] = indexes[i].getName(); + } + return names; + } + + Index[] concreteIndices(Context context, String... indexExpressions) { if (indexExpressions == null || indexExpressions.length == 0) { indexExpressions = new String[]{MetaData.ALL}; } @@ -136,11 +173,11 @@ public class IndexNameExpressionResolver extends AbstractComponent { infe.setResources("index_expression", indexExpressions); throw infe; } else { - return Strings.EMPTY_ARRAY; + return Index.EMPTY_ARRAY; } } - final Set concreteIndices = new HashSet<>(expressions.size()); + final Set concreteIndices = new HashSet<>(expressions.size()); for (String expression : expressions) { AliasOrIndex aliasOrIndex = metaData.getAliasAndIndexLookup().get(expression); if (aliasOrIndex == null) { @@ -169,11 +206,11 @@ public class IndexNameExpressionResolver extends AbstractComponent { throw new IndexClosedException(index.getIndex()); } else { if (options.forbidClosedIndices() == false) { - concreteIndices.add(index.getIndex().getName()); + concreteIndices.add(index.getIndex()); } } } else if (index.getState() == IndexMetaData.State.OPEN) { - concreteIndices.add(index.getIndex().getName()); + concreteIndices.add(index.getIndex()); } else { throw new IllegalStateException("index state [" + index.getState() + "] not supported"); } @@ -185,7 +222,7 @@ public class IndexNameExpressionResolver extends AbstractComponent { infe.setResources("index_expression", indexExpressions); throw infe; } - return concreteIndices.toArray(new String[concreteIndices.size()]); + return concreteIndices.toArray(new Index[concreteIndices.size()]); } /** @@ -200,9 +237,9 @@ public class IndexNameExpressionResolver extends AbstractComponent { * @throws IllegalArgumentException if the index resolution lead to more than one index * @return the concrete index obtained as a result of the index resolution */ - public String concreteSingleIndex(ClusterState state, IndicesRequest request) { + public Index concreteSingleIndex(ClusterState state, IndicesRequest request) { String indexExpression = request.indices() != null && request.indices().length > 0 ? request.indices()[0] : null; - String[] indices = concreteIndices(state, request.indicesOptions(), indexExpression); + Index[] indices = concreteIndices(state, request.indicesOptions(), indexExpression); if (indices.length != 1) { throw new IllegalArgumentException("unable to return a single index as the index and options provided got resolved to multiple indices"); } @@ -395,7 +432,7 @@ public class IndexNameExpressionResolver extends AbstractComponent { if (routing != null) { Set r = Strings.splitStringByCommaToSet(routing); Map> routings = new HashMap<>(); - String[] concreteIndices = metaData.concreteAllIndices(); + String[] concreteIndices = metaData.getConcreteAllIndices(); for (String index : concreteIndices) { routings.put(index, r); } @@ -435,7 +472,7 @@ public class IndexNameExpressionResolver extends AbstractComponent { */ boolean isPatternMatchingAllIndices(MetaData metaData, String[] indicesOrAliases, String[] concreteIndices) { // if we end up matching on all indices, check, if its a wildcard parameter, or a "-something" structure - if (concreteIndices.length == metaData.concreteAllIndices().length && indicesOrAliases.length > 0) { + if (concreteIndices.length == metaData.getConcreteAllIndices().length && indicesOrAliases.length > 0) { //we might have something like /-test1,+test1 that would identify all indices //or something like /-test1 with test1 index missing and IndicesOptions.lenient() @@ -686,16 +723,16 @@ public class IndexNameExpressionResolver extends AbstractComponent { } private boolean isEmptyOrTrivialWildcard(List expressions) { - return expressions.isEmpty() || (expressions.size() == 1 && (MetaData.ALL.equals(expressions.get(0))) || Regex.isMatchAllPattern(expressions.get(0))); + return expressions.isEmpty() || (expressions.size() == 1 && (MetaData.ALL.equals(expressions.get(0)) || Regex.isMatchAllPattern(expressions.get(0)))); } private List resolveEmptyOrTrivialWildcard(IndicesOptions options, MetaData metaData, boolean assertEmpty) { if (options.expandWildcardsOpen() && options.expandWildcardsClosed()) { - return Arrays.asList(metaData.concreteAllIndices()); + return Arrays.asList(metaData.getConcreteAllIndices()); } else if (options.expandWildcardsOpen()) { - return Arrays.asList(metaData.concreteAllOpenIndices()); + return Arrays.asList(metaData.getConcreteAllOpenIndices()); } else if (options.expandWildcardsClosed()) { - return Arrays.asList(metaData.concreteAllClosedIndices()); + return Arrays.asList(metaData.getConcreteAllClosedIndices()); } else { assert assertEmpty : "Shouldn't end up here"; return Collections.emptyList(); @@ -867,7 +904,7 @@ public class IndexNameExpressionResolver extends AbstractComponent { * Returns true iff the given expression resolves to the given index name otherwise false */ public final boolean matchesIndex(String indexName, String expression, ClusterState state) { - final String[] concreteIndices = concreteIndices(state, IndicesOptions.lenientExpandOpen(), expression); + final String[] concreteIndices = concreteIndexNames(state, IndicesOptions.lenientExpandOpen(), expression); for (String index : concreteIndices) { if (Regex.simpleMatch(index, indexName)) { return true; diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java index a88f1609b9e..10b05c46657 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java @@ -84,20 +84,10 @@ public class MappingMetaData extends AbstractDiffable { private static final FormatDateTimeFormatter EPOCH_MILLIS_PARSER = Joda.forPattern("epoch_millis"); - public static String parseStringTimestamp(String timestampAsString, FormatDateTimeFormatter dateTimeFormatter, - Version version) throws TimestampParsingException { + public static String parseStringTimestamp(String timestampAsString, FormatDateTimeFormatter dateTimeFormatter) throws TimestampParsingException { try { - // no need for unix timestamp parsing in 2.x - FormatDateTimeFormatter formatter = version.onOrAfter(Version.V_2_0_0_beta1) ? dateTimeFormatter : EPOCH_MILLIS_PARSER; - return Long.toString(formatter.parser().parseMillis(timestampAsString)); + return Long.toString(dateTimeFormatter.parser().parseMillis(timestampAsString)); } catch (RuntimeException e) { - if (version.before(Version.V_2_0_0_beta1)) { - try { - return Long.toString(dateTimeFormatter.parser().parseMillis(timestampAsString)); - } catch (RuntimeException e1) { - throw new TimestampParsingException(timestampAsString, e1); - } - } throw new TimestampParsingException(timestampAsString, e); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index d7dddb15984..db6871b0645 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -30,7 +30,7 @@ import org.elasticsearch.cluster.InternalClusterInfoService; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider; -import org.elasticsearch.cluster.service.InternalClusterService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.Strings; @@ -41,6 +41,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.loader.SettingsLoader; import org.elasticsearch.common.xcontent.FromXContentBuilder; @@ -50,6 +51,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.gateway.MetaDataStateFormat; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.store.IndexStoreConfig; @@ -139,7 +141,8 @@ public class MetaData implements Iterable, Diffable, Fr } - public static final Setting SETTING_READ_ONLY_SETTING = Setting.boolSetting("cluster.blocks.read_only", false, true, Setting.Scope.CLUSTER); + public static final Setting SETTING_READ_ONLY_SETTING = + Setting.boolSetting("cluster.blocks.read_only", false, Property.Dynamic, Property.NodeScope); public static final ClusterBlock CLUSTER_READ_ONLY_BLOCK = new ClusterBlock(6, "cluster read-only (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); @@ -151,6 +154,8 @@ public class MetaData implements Iterable, Diffable, Fr public static final String CONTEXT_MODE_GATEWAY = XContentContext.GATEWAY.toString(); + public static final String GLOBAL_STATE_FILE_PREFIX = "global-"; + private final String clusterUUID; private final long version; @@ -230,7 +235,7 @@ public class MetaData implements Iterable, Diffable, Fr public boolean equalsAliases(MetaData other) { for (ObjectCursor cursor : other.indices().values()) { IndexMetaData otherIndex = cursor.value; - IndexMetaData thisIndex= index(otherIndex.getIndex()); + IndexMetaData thisIndex = index(otherIndex.getIndex()); if (thisIndex == null) { return false; } @@ -368,26 +373,14 @@ public class MetaData implements Iterable, Diffable, Fr /** * Returns all the concrete indices. */ - public String[] concreteAllIndices() { - return allIndices; - } - public String[] getConcreteAllIndices() { - return concreteAllIndices(); - } - - public String[] concreteAllOpenIndices() { - return allOpenIndices; + return allIndices; } public String[] getConcreteAllOpenIndices() { return allOpenIndices; } - public String[] concreteAllClosedIndices() { - return allClosedIndices; - } - public String[] getConcreteAllClosedIndices() { return allClosedIndices; } @@ -455,7 +448,28 @@ public class MetaData implements Iterable, Diffable, Fr } public IndexMetaData index(Index index) { - return index(index.getName()); + IndexMetaData metaData = index(index.getName()); + if (metaData != null && metaData.getIndexUUID().equals(index.getUUID())) { + return metaData; + } + return null; + } + + /** + * Returns the {@link IndexMetaData} for this index. + * @throws IndexNotFoundException if no metadata for this index is found + */ + public IndexMetaData getIndexSafe(Index index) { + IndexMetaData metaData = index(index.getName()); + if (metaData != null) { + if(metaData.getIndexUUID().equals(index.getUUID())) { + return metaData; + } + throw new IndexNotFoundException(index, + new IllegalStateException("index uuid doesn't match expected: [" + index.getUUID() + + "] but got: [" + metaData.getIndexUUID() +"]")); + } + throw new IndexNotFoundException(index); } public ImmutableOpenMap indices() { @@ -486,20 +500,13 @@ public class MetaData implements Iterable, Diffable, Fr return (T) customs.get(type); } - public int totalNumberOfShards() { + + public int getTotalNumberOfShards() { return this.totalNumberOfShards; } - public int getTotalNumberOfShards() { - return totalNumberOfShards(); - } - - public int numberOfShards() { - return this.numberOfShards; - } - public int getNumberOfShards() { - return numberOfShards(); + return this.numberOfShards; } /** @@ -731,7 +738,7 @@ public class MetaData implements Iterable, Diffable, Fr InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.getKey(), InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), - InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.getKey())); + ClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.getKey())); /** As of 2.0 we require units for time and byte-sized settings. This methods adds default units to any cluster settings that don't * specify a unit. */ @@ -779,9 +786,9 @@ public class MetaData implements Iterable, Diffable, Fr metaData.getIndices(), metaData.getTemplates(), metaData.getCustoms(), - metaData.concreteAllIndices(), - metaData.concreteAllOpenIndices(), - metaData.concreteAllClosedIndices(), + metaData.getConcreteAllIndices(), + metaData.getConcreteAllOpenIndices(), + metaData.getConcreteAllClosedIndices(), metaData.getAliasAndIndexLookup()); } else { // No changes: @@ -842,6 +849,19 @@ public class MetaData implements Iterable, Diffable, Fr return indices.get(index); } + public IndexMetaData getSafe(Index index) { + IndexMetaData indexMetaData = get(index.getName()); + if (indexMetaData != null) { + if(indexMetaData.getIndexUUID().equals(index.getUUID())) { + return indexMetaData; + } + throw new IndexNotFoundException(index, + new IllegalStateException("index uuid doesn't match expected: [" + index.getUUID() + + "] but got: [" + indexMetaData.getIndexUUID() +"]")); + } + throw new IndexNotFoundException(index); + } + public Builder remove(String index) { indices.remove(index); return this; @@ -1143,4 +1163,28 @@ public class MetaData implements Iterable, Diffable, Fr return PROTO.readFrom(in); } } + + private final static ToXContent.Params FORMAT_PARAMS; + static { + Map params = new HashMap<>(2); + params.put("binary", "true"); + params.put(MetaData.CONTEXT_MODE_PARAM, MetaData.CONTEXT_MODE_GATEWAY); + FORMAT_PARAMS = new MapParams(params); + } + + /** + * State format for {@link MetaData} to write to and load from disk + */ + public final static MetaDataStateFormat FORMAT = new MetaDataStateFormat(XContentType.SMILE, GLOBAL_STATE_FILE_PREFIX) { + + @Override + public void toXContent(XContentBuilder builder, MetaData state) throws IOException { + Builder.toXContent(state, builder, FORMAT_PARAMS); + } + + @Override + public MetaData fromXContent(XContentParser parser) throws IOException { + return Builder.fromXContent(parser); + } + }; } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 62f3ad802a0..e0db19cb516 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -28,7 +28,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.block.ClusterBlock; @@ -39,6 +38,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.ValidationException; @@ -53,6 +53,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.env.Environment; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.mapper.DocumentMapper; @@ -188,7 +189,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { @Override public ClusterState execute(ClusterState currentState) throws Exception { - boolean indexCreated = false; + Index createdIndex = null; String removalReason = null; try { validate(request, currentState); @@ -308,10 +309,9 @@ public class MetaDataCreateIndexService extends AbstractComponent { // Set up everything, now locally create the index to see that things are ok, and apply final IndexMetaData tmpImd = IndexMetaData.builder(request.index()).settings(actualIndexSettings).build(); // create the index here (on the master) to validate it can be created, as well as adding the mapping - indicesService.createIndex(nodeServicesProvider, tmpImd, Collections.emptyList()); - indexCreated = true; + final IndexService indexService = indicesService.createIndex(nodeServicesProvider, tmpImd, Collections.emptyList()); + createdIndex = indexService.index(); // now add the mappings - IndexService indexService = indicesService.indexServiceSafe(request.index()); MapperService mapperService = indexService.mapperService(); // first, add the default mapping if (mappings.containsKey(MapperService.DEFAULT_MAPPING)) { @@ -415,9 +415,9 @@ public class MetaDataCreateIndexService extends AbstractComponent { removalReason = "cleaning up after validating index on master"; return updatedState; } finally { - if (indexCreated) { + if (createdIndex != null) { // Index was already partially created - need to clean up - indicesService.removeIndex(request.index(), removalReason != null ? removalReason : "failed to create index"); + indicesService.removeIndex(createdIndex, removalReason != null ? removalReason : "failed to create index"); } } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java index 54c014fb4ed..5e6d35aacfe 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.action.support.master.MasterNodeRequest; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.action.index.NodeIndexDeletedAction; @@ -28,20 +27,22 @@ import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.FutureUtils; -import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.Index; +import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.threadpool.ThreadPool; -import java.util.Arrays; -import java.util.Collection; +import java.util.Set; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; /** * @@ -67,10 +68,9 @@ public class MetaDataDeleteIndexService extends AbstractComponent { } public void deleteIndices(final Request request, final Listener userListener) { - Collection indices = Arrays.asList(request.indices); final DeleteIndexListener listener = new DeleteIndexListener(userListener); - clusterService.submitStateUpdateTask("delete-index " + indices, new ClusterStateUpdateTask(Priority.URGENT) { + clusterService.submitStateUpdateTask("delete-index " + request.indices, new ClusterStateUpdateTask(Priority.URGENT) { @Override public TimeValue timeout() { @@ -84,20 +84,21 @@ public class MetaDataDeleteIndexService extends AbstractComponent { @Override public ClusterState execute(final ClusterState currentState) { + final MetaData meta = currentState.metaData(); + final Set metaDatas = request.indices.stream().map(i -> meta.getIndexSafe(i)).collect(Collectors.toSet()); + // Check if index deletion conflicts with any running snapshots + SnapshotsService.checkIndexDeletion(currentState, metaDatas); + final Set indices = request.indices; RoutingTable.Builder routingTableBuilder = RoutingTable.builder(currentState.routingTable()); - MetaData.Builder metaDataBuilder = MetaData.builder(currentState.metaData()); + MetaData.Builder metaDataBuilder = MetaData.builder(meta); ClusterBlocks.Builder clusterBlocksBuilder = ClusterBlocks.builder().blocks(currentState.blocks()); - for (final String index: indices) { - if (!currentState.metaData().hasConcreteIndex(index)) { - throw new IndexNotFoundException(index); - } - + for (final Index index : indices) { + String indexName = index.getName(); logger.debug("[{}] deleting index", index); - - routingTableBuilder.remove(index); - clusterBlocksBuilder.removeIndexBlocks(index); - metaDataBuilder.remove(index); + routingTableBuilder.remove(indexName); + clusterBlocksBuilder.removeIndexBlocks(indexName); + metaDataBuilder.remove(indexName); } // wait for events from all nodes that it has been removed from their respective metadata... int count = currentState.nodes().size(); @@ -108,7 +109,7 @@ public class MetaDataDeleteIndexService extends AbstractComponent { // this listener will be notified once we get back a notification based on the cluster state change below. final NodeIndexDeletedAction.Listener nodeIndexDeleteListener = new NodeIndexDeletedAction.Listener() { @Override - public void onNodeIndexDeleted(String deleted, String nodeId) { + public void onNodeIndexDeleted(Index deleted, String nodeId) { if (indices.contains(deleted)) { if (counter.decrementAndGet() == 0) { listener.onResponse(new Response(true)); @@ -118,7 +119,7 @@ public class MetaDataDeleteIndexService extends AbstractComponent { } @Override - public void onNodeIndexStoreDeleted(String deleted, String nodeId) { + public void onNodeIndexStoreDeleted(Index deleted, String nodeId) { if (indices.contains(deleted)) { if (counter.decrementAndGet() == 0) { listener.onResponse(new Response(true)); @@ -183,12 +184,12 @@ public class MetaDataDeleteIndexService extends AbstractComponent { public static class Request { - final String[] indices; + final Set indices; TimeValue timeout = TimeValue.timeValueSeconds(10); TimeValue masterTimeout = MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT; - public Request(String[] indices) { + public Request(Set indices) { this.indices = indices; } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java index 52154bd2c04..e39b86a1611 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java @@ -23,14 +23,15 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesClusterStateUpdateRequest; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.NodeServicesProvider; @@ -74,7 +75,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent { @Override public ClusterState execute(final ClusterState currentState) { - List indicesToClose = new ArrayList<>(); + List indicesToClose = new ArrayList<>(); Map indices = new HashMap<>(); try { for (AliasAction aliasAction : request.actions()) { @@ -112,7 +113,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent { logger.warn("[{}] failed to temporary create in order to apply alias action", e, indexMetaData.getIndex()); continue; } - indicesToClose.add(indexMetaData.getIndex().getName()); + indicesToClose.add(indexMetaData.getIndex()); } indices.put(indexMetaData.getIndex().getName(), indexService); } @@ -153,7 +154,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent { } return currentState; } finally { - for (String index : indicesToClose) { + for (Index index : indicesToClose) { indicesService.removeIndex(index, "created for alias processing"); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java index df26df29800..71962d6356a 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java @@ -23,7 +23,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.close.CloseIndexClusterStateUpdateRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexClusterStateUpdateRequest; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.block.ClusterBlock; @@ -32,16 +31,21 @@ import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.Index; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.snapshots.RestoreService; +import org.elasticsearch.snapshots.SnapshotsService; import java.util.ArrayList; import java.util.Arrays; +import java.util.HashSet; import java.util.List; +import java.util.Set; /** * Service responsible for submitting open/close index requests @@ -78,15 +82,11 @@ public class MetaDataIndexStateService extends AbstractComponent { @Override public ClusterState execute(ClusterState currentState) { - List indicesToClose = new ArrayList<>(); - for (String index : request.indices()) { - IndexMetaData indexMetaData = currentState.metaData().index(index); - if (indexMetaData == null) { - throw new IndexNotFoundException(index); - } - + Set indicesToClose = new HashSet<>(); + for (Index index : request.indices()) { + final IndexMetaData indexMetaData = currentState.metaData().getIndexSafe(index); if (indexMetaData.getState() != IndexMetaData.State.CLOSE) { - indicesToClose.add(index); + indicesToClose.add(indexMetaData); } } @@ -94,21 +94,26 @@ public class MetaDataIndexStateService extends AbstractComponent { return currentState; } + // Check if index closing conflicts with any running restores + RestoreService.checkIndexClosing(currentState, indicesToClose); + // Check if index closing conflicts with any running snapshots + SnapshotsService.checkIndexClosing(currentState, indicesToClose); logger.info("closing indices [{}]", indicesAsString); MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); ClusterBlocks.Builder blocksBuilder = ClusterBlocks.builder() .blocks(currentState.blocks()); - for (String index : indicesToClose) { - mdBuilder.put(IndexMetaData.builder(currentState.metaData().index(index)).state(IndexMetaData.State.CLOSE)); - blocksBuilder.addIndexBlock(index, INDEX_CLOSED_BLOCK); + for (IndexMetaData openIndexMetadata : indicesToClose) { + final String indexName = openIndexMetadata.getIndex().getName(); + mdBuilder.put(IndexMetaData.builder(openIndexMetadata).state(IndexMetaData.State.CLOSE)); + blocksBuilder.addIndexBlock(indexName, INDEX_CLOSED_BLOCK); } ClusterState updatedState = ClusterState.builder(currentState).metaData(mdBuilder).blocks(blocksBuilder).build(); RoutingTable.Builder rtBuilder = RoutingTable.builder(currentState.routingTable()); - for (String index : indicesToClose) { - rtBuilder.remove(index); + for (IndexMetaData index : indicesToClose) { + rtBuilder.remove(index.getIndex().getName()); } RoutingAllocation.Result routingResult = allocationService.reroute( @@ -134,14 +139,11 @@ public class MetaDataIndexStateService extends AbstractComponent { @Override public ClusterState execute(ClusterState currentState) { - List indicesToOpen = new ArrayList<>(); - for (String index : request.indices()) { - IndexMetaData indexMetaData = currentState.metaData().index(index); - if (indexMetaData == null) { - throw new IndexNotFoundException(index); - } + List indicesToOpen = new ArrayList<>(); + for (Index index : request.indices()) { + final IndexMetaData indexMetaData = currentState.metaData().getIndexSafe(index); if (indexMetaData.getState() != IndexMetaData.State.OPEN) { - indicesToOpen.add(index); + indicesToOpen.add(indexMetaData); } } @@ -154,20 +156,21 @@ public class MetaDataIndexStateService extends AbstractComponent { MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); ClusterBlocks.Builder blocksBuilder = ClusterBlocks.builder() .blocks(currentState.blocks()); - for (String index : indicesToOpen) { - IndexMetaData indexMetaData = IndexMetaData.builder(currentState.metaData().index(index)).state(IndexMetaData.State.OPEN).build(); + for (IndexMetaData closedMetaData : indicesToOpen) { + final String indexName = closedMetaData.getIndex().getName(); + IndexMetaData indexMetaData = IndexMetaData.builder(closedMetaData).state(IndexMetaData.State.OPEN).build(); // The index might be closed because we couldn't import it due to old incompatible version // We need to check that this index can be upgraded to the current version indexMetaData = metaDataIndexUpgradeService.upgradeIndexMetaData(indexMetaData); mdBuilder.put(indexMetaData, true); - blocksBuilder.removeIndexBlock(index, INDEX_CLOSED_BLOCK); + blocksBuilder.removeIndexBlock(indexName, INDEX_CLOSED_BLOCK); } ClusterState updatedState = ClusterState.builder(currentState).metaData(mdBuilder).blocks(blocksBuilder).build(); RoutingTable.Builder rtBuilder = RoutingTable.builder(updatedState.routingTable()); - for (String index : indicesToOpen) { - rtBuilder.addAsFromCloseToOpen(updatedState.metaData().index(index)); + for (IndexMetaData index : indicesToOpen) { + rtBuilder.addAsFromCloseToOpen(updatedState.metaData().getIndexSafe(index.getIndex())); } RoutingAllocation.Result routingResult = allocationService.reroute( diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java index da2fc064dc4..1206185a609 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java @@ -21,9 +21,9 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.support.master.MasterNodeRequest; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.ValidationException; diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index c06a5cc7c1c..cafdc4581a1 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -23,27 +23,28 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest; import org.elasticsearch.cluster.AckedClusterStateTaskListener; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskConfig; import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.percolator.PercolatorFieldMapper; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.InvalidTypeNameException; -import org.elasticsearch.percolator.PercolatorService; import java.io.IOException; import java.util.ArrayList; @@ -112,13 +113,13 @@ public class MetaDataMappingService extends AbstractComponent { MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); for (Map.Entry> entry : tasksPerIndex.entrySet()) { - String index = entry.getKey(); - IndexMetaData indexMetaData = mdBuilder.get(index); + IndexMetaData indexMetaData = mdBuilder.get(entry.getKey()); if (indexMetaData == null) { // index got deleted on us, ignore... - logger.debug("[{}] ignoring tasks - index meta data doesn't exist", index); + logger.debug("[{}] ignoring tasks - index meta data doesn't exist", entry.getKey()); continue; } + final Index index = indexMetaData.getIndex(); // the tasks lists to iterate over, filled with the list of mapping tasks, trying to keep // the latest (based on order) update mapping one per node List allIndexTasks = entry.getValue(); @@ -127,7 +128,7 @@ public class MetaDataMappingService extends AbstractComponent { if (indexMetaData.isSameUUID(task.indexUUID)) { hasTaskWithRightUUID = true; } else { - logger.debug("[{}] ignoring task [{}] - index meta data doesn't match task uuid", index, task); + logger.debug("{} ignoring task [{}] - index meta data doesn't match task uuid", index, task); } } if (hasTaskWithRightUUID == false) { @@ -136,7 +137,7 @@ public class MetaDataMappingService extends AbstractComponent { // construct the actual index if needed, and make sure the relevant mappings are there boolean removeIndex = false; - IndexService indexService = indicesService.indexService(index); + IndexService indexService = indicesService.indexService(indexMetaData.getIndex()); if (indexService == null) { // we need to create the index here, and add the current mapping to it, so we can merge indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList()); @@ -208,38 +209,38 @@ public class MetaDataMappingService extends AbstractComponent { class PutMappingExecutor implements ClusterStateTaskExecutor { @Override - public BatchResult execute(ClusterState currentState, List tasks) throws Exception { - Set indicesToClose = new HashSet<>(); + public BatchResult execute(ClusterState currentState, + List tasks) throws Exception { + Set indicesToClose = new HashSet<>(); BatchResult.Builder builder = BatchResult.builder(); try { // precreate incoming indices; for (PutMappingClusterStateUpdateRequest request : tasks) { - // failures here mean something is broken with our cluster state - fail all tasks by letting exceptions bubble up - for (String index : request.indices()) { - final IndexMetaData indexMetaData = currentState.metaData().index(index); - if (indexMetaData != null && indicesService.hasIndex(index) == false) { - // if we don't have the index, we will throw exceptions later; - indicesToClose.add(index); - IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList()); - // add mappings for all types, we need them for cross-type validation - for (ObjectCursor mapping : indexMetaData.getMappings().values()) { - indexService.mapperService().merge(mapping.value.type(), mapping.value.source(), MapperService.MergeReason.MAPPING_RECOVERY, request.updateAllTypes()); + try { + for (Index index : request.indices()) { + final IndexMetaData indexMetaData = currentState.metaData().getIndexSafe(index); + if (indicesService.hasIndex(indexMetaData.getIndex()) == false) { + // if the index does not exists we create it once, add all types to the mapper service and + // close it later once we are done with mapping update + indicesToClose.add(indexMetaData.getIndex()); + IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, + Collections.emptyList()); + // add mappings for all types, we need them for cross-type validation + for (ObjectCursor mapping : indexMetaData.getMappings().values()) { + indexService.mapperService().merge(mapping.value.type(), mapping.value.source(), + MapperService.MergeReason.MAPPING_RECOVERY, request.updateAllTypes()); + } } } - } - } - for (PutMappingClusterStateUpdateRequest request : tasks) { - try { currentState = applyRequest(currentState, request); builder.success(request); } catch (Throwable t) { builder.failure(request, t); } } - return builder.build(currentState); } finally { - for (String index : indicesToClose) { + for (Index index : indicesToClose) { indicesService.removeIndex(index, "created for mapping processing"); } } @@ -248,8 +249,17 @@ public class MetaDataMappingService extends AbstractComponent { private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request) throws IOException { String mappingType = request.type(); CompressedXContent mappingUpdateSource = new CompressedXContent(request.source()); - for (String index : request.indices()) { + final MetaData metaData = currentState.metaData(); + final List> updateList = new ArrayList<>(); + for (Index index : request.indices()) { IndexService indexService = indicesService.indexServiceSafe(index); + // IMPORTANT: always get the metadata from the state since it get's batched + // and if we pull it from the indexService we might miss an update etc. + final IndexMetaData indexMetaData = currentState.getMetaData().getIndexSafe(index); + + // this is paranoia... just to be sure we use the exact same indexService and metadata tuple on the update that + // we used for the validation, it makes this mechanism little less scary (a little) + updateList.add(new Tuple<>(indexService, indexMetaData)); // try and parse it (no need to add it here) so we can bail early in case of parsing exception DocumentMapper newMapper; DocumentMapper existingMapper = indexService.mapperService().documentMapper(request.type()); @@ -270,7 +280,6 @@ public class MetaDataMappingService extends AbstractComponent { // and a put mapping api call, so we don't which type did exist before. // Also the order of the mappings may be backwards. if (newMapper.parentFieldMapper().active()) { - IndexMetaData indexMetaData = currentState.metaData().index(index); for (ObjectCursor mapping : indexMetaData.getMappings().values()) { if (newMapper.parentFieldMapper().type().equals(mapping.value.type())) { throw new IllegalArgumentException("can't add a _parent field that points to an already existing type"); @@ -287,17 +296,16 @@ public class MetaDataMappingService extends AbstractComponent { } assert mappingType != null; - if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') { + if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorFieldMapper.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') { throw new InvalidTypeNameException("Document mapping type name can't start with '_'"); } - MetaData.Builder builder = MetaData.builder(currentState.metaData()); - for (String index : request.indices()) { + MetaData.Builder builder = MetaData.builder(metaData); + for (Tuple toUpdate : updateList) { // do the actual merge here on the master, and update the mapping source - IndexService indexService = indicesService.indexService(index); - if (indexService == null) { - continue; - } - + // we use the exact same indexService and metadata we used to validate above here to actually apply the update + final IndexService indexService = toUpdate.v1(); + final IndexMetaData indexMetaData = toUpdate.v2(); + final Index index = indexMetaData.getIndex(); CompressedXContent existingSource = null; DocumentMapper existingMapper = indexService.mapperService().documentMapper(mappingType); if (existingMapper != null) { @@ -312,24 +320,20 @@ public class MetaDataMappingService extends AbstractComponent { } else { // use the merged mapping source if (logger.isDebugEnabled()) { - logger.debug("[{}] update_mapping [{}] with source [{}]", index, mergedMapper.type(), updatedSource); + logger.debug("{} update_mapping [{}] with source [{}]", index, mergedMapper.type(), updatedSource); } else if (logger.isInfoEnabled()) { - logger.info("[{}] update_mapping [{}]", index, mergedMapper.type()); + logger.info("{} update_mapping [{}]", index, mergedMapper.type()); } } } else { if (logger.isDebugEnabled()) { - logger.debug("[{}] create_mapping [{}] with source [{}]", index, mappingType, updatedSource); + logger.debug("{} create_mapping [{}] with source [{}]", index, mappingType, updatedSource); } else if (logger.isInfoEnabled()) { - logger.info("[{}] create_mapping [{}]", index, mappingType); + logger.info("{} create_mapping [{}]", index, mappingType); } } - IndexMetaData indexMetaData = currentState.metaData().index(index); - if (indexMetaData == null) { - throw new IndexNotFoundException(index); - } IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(indexMetaData); // Mapping updates on a single type may have side-effects on other types so we need to // update mapping metadata on all types diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java index 2d7ba4c3c05..1c57f446074 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java @@ -23,10 +23,8 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsClusterStateUpdateRequest; import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeSettingsClusterStateUpdateRequest; -import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; @@ -35,6 +33,7 @@ import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; @@ -43,7 +42,7 @@ import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.Index; import java.util.ArrayList; import java.util.HashMap; @@ -86,7 +85,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements // we will want to know this for translating "all" to a number final int dataNodeCount = event.state().nodes().dataNodes().size(); - Map> nrReplicasChanged = new HashMap<>(); + Map> nrReplicasChanged = new HashMap<>(); // we need to do this each time in case it was changed by update settings for (final IndexMetaData indexMetaData : event.state().metaData()) { AutoExpandReplicas autoExpandReplicas = IndexMetaData.INDEX_AUTO_EXPAND_REPLICAS_SETTING.get(indexMetaData.getSettings()); @@ -117,7 +116,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements nrReplicasChanged.put(numberOfReplicas, new ArrayList<>()); } - nrReplicasChanged.get(numberOfReplicas).add(indexMetaData.getIndex().getName()); + nrReplicasChanged.get(numberOfReplicas).add(indexMetaData.getIndex()); } } } @@ -126,25 +125,25 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements // update settings and kick of a reroute (implicit) for them to take effect for (final Integer fNumberOfReplicas : nrReplicasChanged.keySet()) { Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, fNumberOfReplicas).build(); - final List indices = nrReplicasChanged.get(fNumberOfReplicas); + final List indices = nrReplicasChanged.get(fNumberOfReplicas); UpdateSettingsClusterStateUpdateRequest updateRequest = new UpdateSettingsClusterStateUpdateRequest() - .indices(indices.toArray(new String[indices.size()])).settings(settings) + .indices(indices.toArray(new Index[indices.size()])).settings(settings) .ackTimeout(TimeValue.timeValueMillis(0)) //no need to wait for ack here .masterNodeTimeout(TimeValue.timeValueMinutes(10)); updateSettings(updateRequest, new ActionListener() { @Override public void onResponse(ClusterStateUpdateResponse response) { - for (String index : indices) { - logger.info("[{}] auto expanded replicas to [{}]", index, fNumberOfReplicas); + for (Index index : indices) { + logger.info("{} auto expanded replicas to [{}]", index, fNumberOfReplicas); } } @Override public void onFailure(Throwable t) { - for (String index : indices) { - logger.warn("[{}] fail to auto expand replicas to [{}]", index, fNumberOfReplicas); + for (Index index : indices) { + logger.warn("{} fail to auto expand replicas to [{}]", index, fNumberOfReplicas); } } }); @@ -188,16 +187,19 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements @Override public ClusterState execute(ClusterState currentState) { - String[] actualIndices = indexNameExpressionResolver.concreteIndices(currentState, IndicesOptions.strictExpand(), request.indices()); RoutingTable.Builder routingTableBuilder = RoutingTable.builder(currentState.routingTable()); MetaData.Builder metaDataBuilder = MetaData.builder(currentState.metaData()); // allow to change any settings to a close index, and only allow dynamic settings to be changed // on an open index - Set openIndices = new HashSet<>(); - Set closeIndices = new HashSet<>(); - for (String index : actualIndices) { - if (currentState.metaData().index(index).getState() == IndexMetaData.State.OPEN) { + Set openIndices = new HashSet<>(); + Set closeIndices = new HashSet<>(); + final String[] actualIndices = new String[request.indices().length]; + for (int i = 0; i < request.indices().length; i++) { + Index index = request.indices()[i]; + actualIndices[i] = index.getName(); + final IndexMetaData metaData = currentState.metaData().getIndexSafe(index); + if (metaData.getState() == IndexMetaData.State.OPEN) { openIndices.add(index); } else { closeIndices.add(index); @@ -206,13 +208,13 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements if (closeIndices.size() > 0 && closedSettings.get(IndexMetaData.SETTING_NUMBER_OF_REPLICAS) != null) { throw new IllegalArgumentException(String.format(Locale.ROOT, - "Can't update [%s] on closed indices [%s] - can leave index in an unopenable state", IndexMetaData.SETTING_NUMBER_OF_REPLICAS, + "Can't update [%s] on closed indices %s - can leave index in an unopenable state", IndexMetaData.SETTING_NUMBER_OF_REPLICAS, closeIndices )); } if (!skippedSettigns.getAsMap().isEmpty() && !openIndices.isEmpty()) { throw new IllegalArgumentException(String.format(Locale.ROOT, - "Can't update non dynamic settings[%s] for open indices [%s]", + "Can't update non dynamic settings [%s] for open indices %s", skippedSettigns.getAsMap().keySet(), openIndices )); @@ -232,28 +234,22 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_READ_BLOCK, IndexMetaData.INDEX_BLOCKS_READ_SETTING, openSettings); if (!openIndices.isEmpty()) { - for (String index : openIndices) { - IndexMetaData indexMetaData = metaDataBuilder.get(index); - if (indexMetaData == null) { - throw new IndexNotFoundException(index); - } + for (Index index : openIndices) { + IndexMetaData indexMetaData = metaDataBuilder.getSafe(index); Settings.Builder updates = Settings.builder(); Settings.Builder indexSettings = Settings.builder().put(indexMetaData.getSettings()); - if (indexScopedSettings.updateDynamicSettings(openSettings, indexSettings, updates, index)) { + if (indexScopedSettings.updateDynamicSettings(openSettings, indexSettings, updates, index.getName())) { metaDataBuilder.put(IndexMetaData.builder(indexMetaData).settings(indexSettings)); } } } if (!closeIndices.isEmpty()) { - for (String index : closeIndices) { - IndexMetaData indexMetaData = metaDataBuilder.get(index); - if (indexMetaData == null) { - throw new IndexNotFoundException(index); - } + for (Index index : closeIndices) { + IndexMetaData indexMetaData = metaDataBuilder.getSafe(index); Settings.Builder updates = Settings.builder(); Settings.Builder indexSettings = Settings.builder().put(indexMetaData.getSettings()); - if (indexScopedSettings.updateSettings(closedSettings, indexSettings, updates, index)) { + if (indexScopedSettings.updateSettings(closedSettings, indexSettings, updates, index.getName())) { metaDataBuilder.put(IndexMetaData.builder(indexMetaData).settings(indexSettings)); } } @@ -265,11 +261,11 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements // now, reroute in case things change that require it (like number of replicas) RoutingAllocation.Result routingResult = allocationService.reroute(updatedState, "settings update"); updatedState = ClusterState.builder(updatedState).routingResult(routingResult).build(); - for (String index : openIndices) { - indexScopedSettings.dryRun(updatedState.metaData().index(index).getSettings()); + for (Index index : openIndices) { + indexScopedSettings.dryRun(updatedState.metaData().getIndexSafe(index).getSettings()); } - for (String index : closeIndices) { - indexScopedSettings.dryRun(updatedState.metaData().index(index).getSettings()); + for (Index index : closeIndices) { + indexScopedSettings.dryRun(updatedState.metaData().getIndexSafe(index).getSettings()); } return updatedState; } diff --git a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeService.java b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeService.java index c53c8def5c6..51cc16a8402 100644 --- a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeService.java +++ b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeService.java @@ -19,25 +19,42 @@ package org.elasticsearch.cluster.node; +import org.elasticsearch.Version; +import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.node.Node; +import org.elasticsearch.common.transport.TransportAddress; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Random; import java.util.concurrent.CopyOnWriteArrayList; /** */ public class DiscoveryNodeService extends AbstractComponent { + public static final Setting NODE_ID_SEED_SETTING = + // don't use node.id.seed so it won't be seen as an attribute + Setting.longSetting("node_id.seed", 0L, Long.MIN_VALUE, Property.NodeScope); private final List customAttributesProviders = new CopyOnWriteArrayList<>(); + private final Version version; @Inject - public DiscoveryNodeService(Settings settings) { + public DiscoveryNodeService(Settings settings, Version version) { super(settings); + this.version = version; + } + + public static String generateNodeId(Settings settings) { + Random random = Randomness.get(settings, NODE_ID_SEED_SETTING); + return Strings.randomBase64UUID(random); } public DiscoveryNodeService addCustomAttributeProvider(CustomAttributesProvider customAttributesProvider) { @@ -45,7 +62,7 @@ public class DiscoveryNodeService extends AbstractComponent { return this; } - public Map buildAttributes() { + public DiscoveryNode buildLocalNode(TransportAddress publishAddress) { Map attributes = new HashMap<>(Node.NODE_ATTRIBUTES.get(this.settings).getAsMap()); attributes.remove("name"); // name is extracted in other places if (attributes.containsKey("client")) { @@ -74,7 +91,8 @@ public class DiscoveryNodeService extends AbstractComponent { } } - return attributes; + final String nodeId = generateNodeId(settings); + return new DiscoveryNode(settings.get("node.name"), nodeId, publishAddress, attributes, version); } public interface CustomAttributesProvider { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java index 160ccbf06b3..c32d9de363d 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java @@ -313,7 +313,7 @@ public class IndexRoutingTable extends AbstractDiffable imple @Override public IndexRoutingTable readFrom(StreamInput in) throws IOException { - Index index = Index.readIndex(in); + Index index = new Index(in); Builder builder = new Builder(index); int size = in.readVInt(); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java index bda0a24c9a4..e64f8f5d77c 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java @@ -584,7 +584,7 @@ public class IndexShardRoutingTable implements Iterable { } public static IndexShardRoutingTable readFrom(StreamInput in) throws IOException { - Index index = Index.readIndex(in); + Index index = new Index(in); return readFromThin(in, index); } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java index 4f2f9d06097..a6ef564904c 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -597,6 +597,13 @@ public class RoutingNodes implements Iterable { } + /** + * Returns the number of routing nodes + */ + public int size() { + return nodesToShards.size(); + } + public static final class UnassignedShards implements Iterable { private final RoutingNodes nodes; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java index c683f0200dc..90565a6569d 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java @@ -20,12 +20,12 @@ package org.elasticsearch.cluster.routing; import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java b/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java index cfa33e4f225..a5975deb9cf 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java @@ -328,7 +328,7 @@ public final class ShardRouting implements Streamable, ToXContent { @Override public void readFrom(StreamInput in) throws IOException { - readFrom(in, Index.readIndex(in), in.readVInt()); + readFrom(in, new Index(in), in.readVInt()); } /** diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java b/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java index 714c1e4913a..be7d90a1fef 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.joda.FormatDateTimeFormatter; import org.elasticsearch.common.joda.Joda; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContent; @@ -44,7 +45,9 @@ public class UnassignedInfo implements ToXContent, Writeable { public static final FormatDateTimeFormatter DATE_TIME_FORMATTER = Joda.forPattern("dateOptionalTime"); private static final TimeValue DEFAULT_DELAYED_NODE_LEFT_TIMEOUT = TimeValue.timeValueMinutes(1); - public static final Setting INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING = Setting.timeSetting("index.unassigned.node_left.delayed_timeout", DEFAULT_DELAYED_NODE_LEFT_TIMEOUT, true, Setting.Scope.INDEX); + public static final Setting INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING = + Setting.timeSetting("index.unassigned.node_left.delayed_timeout", DEFAULT_DELAYED_NODE_LEFT_TIMEOUT, Property.Dynamic, + Property.IndexScope); /** * Reason why the shard is in unassigned state. diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index eeeb6e3389c..54f9b6855a6 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.routing.allocation; import com.carrotsearch.hppc.cursors.ObjectCursor; -import org.apache.lucene.util.ArrayUtil; import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.health.ClusterHealthStatus; @@ -36,13 +35,13 @@ import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; -import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators; +import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; -import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.gateway.GatewayAllocator; import java.util.ArrayList; import java.util.Collections; @@ -63,14 +62,17 @@ import java.util.stream.Collectors; public class AllocationService extends AbstractComponent { private final AllocationDeciders allocationDeciders; + private final GatewayAllocator gatewayAllocator; + private final ShardsAllocator shardsAllocator; private final ClusterInfoService clusterInfoService; - private final ShardsAllocators shardsAllocators; @Inject - public AllocationService(Settings settings, AllocationDeciders allocationDeciders, ShardsAllocators shardsAllocators, ClusterInfoService clusterInfoService) { + public AllocationService(Settings settings, AllocationDeciders allocationDeciders, GatewayAllocator gatewayAllocator, + ShardsAllocator shardsAllocator, ClusterInfoService clusterInfoService) { super(settings); this.allocationDeciders = allocationDeciders; - this.shardsAllocators = shardsAllocators; + this.gatewayAllocator = gatewayAllocator; + this.shardsAllocator = shardsAllocator; this.clusterInfoService = clusterInfoService; } @@ -92,7 +94,7 @@ public class AllocationService extends AbstractComponent { if (!changed) { return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData()); } - shardsAllocators.applyStartedShards(allocation); + gatewayAllocator.applyStartedShards(allocation); if (withReroute) { reroute(allocation); } @@ -192,7 +194,7 @@ public class AllocationService extends AbstractComponent { if (!changed) { return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData()); } - shardsAllocators.applyFailedShards(allocation); + gatewayAllocator.applyFailedShards(allocation); reroute(allocation); final RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes); String failedShardsAsString = firstListElementsToCommaDelimitedString(failedShards, s -> s.shard.shardId().toString()); @@ -306,14 +308,10 @@ public class AllocationService extends AbstractComponent { if (allocation.routingNodes().unassigned().size() > 0) { updateLeftDelayOfUnassignedShards(allocation, settings); - changed |= shardsAllocators.allocateUnassigned(allocation); + changed |= gatewayAllocator.allocateUnassigned(allocation); } - // move shards that no longer can be allocated - changed |= shardsAllocators.moveShards(allocation); - - // rebalance - changed |= shardsAllocators.rebalance(allocation); + changed |= shardsAllocator.allocate(allocation); assert RoutingNodes.assertShardStats(allocation.routingNodes()); return changed; } @@ -322,7 +320,7 @@ public class AllocationService extends AbstractComponent { public static void updateLeftDelayOfUnassignedShards(RoutingAllocation allocation, Settings settings) { for (ShardRouting shardRouting : allocation.routingNodes().unassigned()) { final MetaData metaData = allocation.metaData(); - final IndexMetaData indexMetaData = metaData.index(shardRouting.index()); + final IndexMetaData indexMetaData = metaData.getIndexSafe(shardRouting.index()); shardRouting.unassignedInfo().updateDelay(allocation.getCurrentNanoTime(), settings, indexMetaData.getSettings()); } } @@ -342,7 +340,6 @@ public class AllocationService extends AbstractComponent { changed |= failReplicasForUnassignedPrimary(allocation, shardEntry); ShardRouting candidate = allocation.routingNodes().activeReplica(shardEntry); if (candidate != null) { - IndexMetaData index = allocation.metaData().index(candidate.index()); routingNodes.swapPrimaryFlag(shardEntry, candidate); if (candidate.relocatingNodeId() != null) { changed = true; @@ -357,6 +354,7 @@ public class AllocationService extends AbstractComponent { } } } + IndexMetaData index = allocation.metaData().getIndexSafe(candidate.index()); if (IndexMetaData.isIndexUsingShadowReplicas(index.getSettings())) { routingNodes.reinitShadowPrimary(candidate); changed = true; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 0c40b26ca67..8102f206799 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.routing.allocation.allocator; -import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.IntroSorter; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -28,9 +27,7 @@ import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; -import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; @@ -39,21 +36,18 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.gateway.PriorityComparator; -import java.util.ArrayList; -import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.IdentityHashMap; import java.util.Iterator; -import java.util.List; import java.util.Map; import java.util.Set; -import java.util.function.Predicate; import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; @@ -74,9 +68,13 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; */ public class BalancedShardsAllocator extends AbstractComponent implements ShardsAllocator { - public static final Setting INDEX_BALANCE_FACTOR_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.index", 0.55f, true, Setting.Scope.CLUSTER); - public static final Setting SHARD_BALANCE_FACTOR_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.shard", 0.45f, true, Setting.Scope.CLUSTER); - public static final Setting THRESHOLD_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.threshold", 1.0f, 0.0f, true, Setting.Scope.CLUSTER); + public static final Setting INDEX_BALANCE_FACTOR_SETTING = + Setting.floatSetting("cluster.routing.allocation.balance.index", 0.55f, Property.Dynamic, Property.NodeScope); + public static final Setting SHARD_BALANCE_FACTOR_SETTING = + Setting.floatSetting("cluster.routing.allocation.balance.shard", 0.45f, Property.Dynamic, Property.NodeScope); + public static final Setting THRESHOLD_SETTING = + Setting.floatSetting("cluster.routing.allocation.balance.threshold", 1.0f, 0.0f, + Property.Dynamic, Property.NodeScope); private volatile WeightFunction weightFunction; private volatile float threshold; @@ -103,27 +101,16 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards } @Override - public void applyStartedShards(StartedRerouteAllocation allocation) { /* ONLY FOR GATEWAYS */ } - - @Override - public void applyFailedShards(FailedRerouteAllocation allocation) { /* ONLY FOR GATEWAYS */ } - - @Override - public boolean allocateUnassigned(RoutingAllocation allocation) { + public boolean allocate(RoutingAllocation allocation) { + if (allocation.routingNodes().size() == 0) { + /* with no nodes this is pointless */ + return false; + } final Balancer balancer = new Balancer(logger, allocation, weightFunction, threshold); - return balancer.allocateUnassigned(); - } - - @Override - public boolean rebalance(RoutingAllocation allocation) { - final Balancer balancer = new Balancer(logger, allocation, weightFunction, threshold); - return balancer.balance(); - } - - @Override - public boolean moveShards(RoutingAllocation allocation) { - final Balancer balancer = new Balancer(logger, allocation, weightFunction, threshold); - return balancer.moveShards(); + boolean changed = balancer.allocateUnassigned(); + changed |= balancer.moveShards(); + changed |= balancer.balance(); + return changed; } /** @@ -203,8 +190,8 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards } private float weight(Balancer balancer, ModelNode node, String index, int numAdditionalShards) { - final float weightShard = (node.numShards() + numAdditionalShards - balancer.avgShardsPerNode()); - final float weightIndex = (node.numShards(index) + numAdditionalShards - balancer.avgShardsPerNode(index)); + final float weightShard = node.numShards() + numAdditionalShards - balancer.avgShardsPerNode(); + final float weightIndex = node.numShards(index) + numAdditionalShards - balancer.avgShardsPerNode(index); return theta0 * weightShard + theta1 * weightIndex; } @@ -216,7 +203,6 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards public static class Balancer { private final ESLogger logger; private final Map nodes = new HashMap<>(); - private final HashSet indices = new HashSet<>(); private final RoutingAllocation allocation; private final RoutingNodes routingNodes; private final WeightFunction weight; @@ -225,19 +211,15 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards private final MetaData metaData; private final float avgShardsPerNode; - private final Predicate assignedFilter = shard -> shard.assignedToNode(); - public Balancer(ESLogger logger, RoutingAllocation allocation, WeightFunction weight, float threshold) { this.logger = logger; this.allocation = allocation; this.weight = weight; this.threshold = threshold; this.routingNodes = allocation.routingNodes(); - for (RoutingNode node : routingNodes) { - nodes.put(node.nodeId(), new ModelNode(node.nodeId())); - } metaData = routingNodes.metaData(); - avgShardsPerNode = ((float) metaData.totalNumberOfShards()) / nodes.size(); + avgShardsPerNode = ((float) metaData.getTotalNumberOfShards()) / routingNodes.size(); + buildModelFromAssigned(); } /** @@ -271,17 +253,6 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards return new NodeSorter(nodesArray(), weight, this); } - private boolean initialize(RoutingNodes routing, RoutingNodes.UnassignedShards unassigned) { - if (logger.isTraceEnabled()) { - logger.trace("Start distributing Shards"); - } - for (ObjectCursor index : allocation.routingTable().indicesRouting().keys()) { - indices.add(index.value); - } - buildModelFromAssigned(routing.shards(assignedFilter)); - return allocateUnassigned(unassigned); - } - private static float absDelta(float lower, float higher) { assert higher >= lower : higher + " lt " + lower +" but was expected to be gte"; return Math.abs(higher - lower); @@ -295,12 +266,36 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards } /** - * Allocates all possible unassigned shards + * Balances the nodes on the cluster model according to the weight function. + * The actual balancing is delegated to {@link #balanceByWeights()} + * * @return true if the current configuration has been * changed, otherwise false */ - final boolean allocateUnassigned() { - return balance(true); + private boolean balance() { + if (logger.isTraceEnabled()) { + logger.trace("Start balancing cluster"); + } + if (allocation.hasPendingAsyncFetch()) { + /* + * see https://github.com/elastic/elasticsearch/issues/14387 + * if we allow rebalance operations while we are still fetching shard store data + * we might end up with unnecessary rebalance operations which can be super confusion/frustrating + * since once the fetches come back we might just move all the shards back again. + * Therefore we only do a rebalance if we have fetched all information. + */ + logger.debug("skipping rebalance due to in-flight shard/store fetches"); + return false; + } + if (allocation.deciders().canRebalance(allocation).type() != Type.YES) { + logger.trace("skipping rebalance as it is disabled"); + return false; + } + if (nodes.size() < 2) { /* skip if we only have one node */ + logger.trace("skipping rebalance as single node only"); + return false; + } + return balanceByWeights(); } /** @@ -317,120 +312,100 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards * @return true if the current configuration has been * changed, otherwise false */ - public boolean balance() { - return balance(false); - } + private boolean balanceByWeights() { + boolean changed = false; + final NodeSorter sorter = newNodeSorter(); + final AllocationDeciders deciders = allocation.deciders(); + final ModelNode[] modelNodes = sorter.modelNodes; + final float[] weights = sorter.weights; + for (String index : buildWeightOrderedIndices(sorter)) { + IndexMetaData indexMetaData = metaData.index(index); - private boolean balance(boolean onlyAssign) { - if (this.nodes.isEmpty()) { - /* with no nodes this is pointless */ - return false; - } - if (logger.isTraceEnabled()) { - if (onlyAssign) { - logger.trace("Start balancing cluster"); - } else { - logger.trace("Start assigning unassigned shards"); + // find nodes that have a shard of this index or where shards of this index are allowed to stay + // move these nodes to the front of modelNodes so that we can only balance based on these nodes + int relevantNodes = 0; + for (int i = 0; i < modelNodes.length; i++) { + ModelNode modelNode = modelNodes[i]; + if (modelNode.getIndex(index) != null + || deciders.canAllocate(indexMetaData, modelNode.getRoutingNode(), allocation).type() != Type.NO) { + // swap nodes at position i and relevantNodes + modelNodes[i] = modelNodes[relevantNodes]; + modelNodes[relevantNodes] = modelNode; + relevantNodes++; + } } - } - final RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned(); - boolean changed = initialize(routingNodes, unassigned); - if (onlyAssign == false && changed == false && allocation.deciders().canRebalance(allocation).type() == Type.YES) { - NodeSorter sorter = newNodeSorter(); - if (nodes.size() > 1) { /* skip if we only have one node */ - AllocationDeciders deciders = allocation.deciders(); - final ModelNode[] modelNodes = sorter.modelNodes; - final float[] weights = sorter.weights; - for (String index : buildWeightOrderedIndices(sorter)) { - IndexMetaData indexMetaData = metaData.index(index); - // find nodes that have a shard of this index or where shards of this index are allowed to stay - // move these nodes to the front of modelNodes so that we can only balance based on these nodes - int relevantNodes = 0; - for (int i = 0; i < modelNodes.length; i++) { - ModelNode modelNode = modelNodes[i]; - if (modelNode.getIndex(index) != null - || deciders.canAllocate(indexMetaData, modelNode.getRoutingNode(routingNodes), allocation).type() != Type.NO) { - // swap nodes at position i and relevantNodes - modelNodes[i] = modelNodes[relevantNodes]; - modelNodes[relevantNodes] = modelNode; - relevantNodes++; + if (relevantNodes < 2) { + continue; + } + + sorter.reset(index, 0, relevantNodes); + int lowIdx = 0; + int highIdx = relevantNodes - 1; + while (true) { + final ModelNode minNode = modelNodes[lowIdx]; + final ModelNode maxNode = modelNodes[highIdx]; + advance_range: + if (maxNode.numShards(index) > 0) { + final float delta = absDelta(weights[lowIdx], weights[highIdx]); + if (lessThan(delta, threshold)) { + if (lowIdx > 0 && highIdx-1 > 0 // is there a chance for a higher delta? + && (absDelta(weights[0], weights[highIdx-1]) > threshold) // check if we need to break at all + ) { + /* This is a special case if allocations from the "heaviest" to the "lighter" nodes is not possible + * due to some allocation decider restrictions like zone awareness. if one zone has for instance + * less nodes than another zone. so one zone is horribly overloaded from a balanced perspective but we + * can't move to the "lighter" shards since otherwise the zone would go over capacity. + * + * This break jumps straight to the condition below were we start moving from the high index towards + * the low index to shrink the window we are considering for balance from the other direction. + * (check shrinking the window from MAX to MIN) + * See #3580 + */ + break advance_range; } + if (logger.isTraceEnabled()) { + logger.trace("Stop balancing index [{}] min_node [{}] weight: [{}] max_node [{}] weight: [{}] delta: [{}]", + index, maxNode.getNodeId(), weights[highIdx], minNode.getNodeId(), weights[lowIdx], delta); + } + break; } - - if (relevantNodes < 2) { + if (logger.isTraceEnabled()) { + logger.trace("Balancing from node [{}] weight: [{}] to node [{}] weight: [{}] delta: [{}]", + maxNode.getNodeId(), weights[highIdx], minNode.getNodeId(), weights[lowIdx], delta); + } + /* pass the delta to the replication function to prevent relocations that only swap the weights of the two nodes. + * a relocation must bring us closer to the balance if we only achieve the same delta the relocation is useless */ + if (tryRelocateShard(minNode, maxNode, index, delta)) { + /* + * TODO we could be a bit smarter here, we don't need to fully sort necessarily + * we could just find the place to insert linearly but the win might be minor + * compared to the added complexity + */ + weights[lowIdx] = sorter.weight(modelNodes[lowIdx]); + weights[highIdx] = sorter.weight(modelNodes[highIdx]); + sorter.sort(0, relevantNodes); + lowIdx = 0; + highIdx = relevantNodes - 1; + changed = true; continue; } - - sorter.reset(index, 0, relevantNodes); - int lowIdx = 0; - int highIdx = relevantNodes - 1; - while (true) { - final ModelNode minNode = modelNodes[lowIdx]; - final ModelNode maxNode = modelNodes[highIdx]; - advance_range: - if (maxNode.numShards(index) > 0) { - final float delta = absDelta(weights[lowIdx], weights[highIdx]); - if (lessThan(delta, threshold)) { - if (lowIdx > 0 && highIdx-1 > 0 // is there a chance for a higher delta? - && (absDelta(weights[0], weights[highIdx-1]) > threshold) // check if we need to break at all - ) { - /* This is a special case if allocations from the "heaviest" to the "lighter" nodes is not possible - * due to some allocation decider restrictions like zone awareness. if one zone has for instance - * less nodes than another zone. so one zone is horribly overloaded from a balanced perspective but we - * can't move to the "lighter" shards since otherwise the zone would go over capacity. - * - * This break jumps straight to the condition below were we start moving from the high index towards - * the low index to shrink the window we are considering for balance from the other direction. - * (check shrinking the window from MAX to MIN) - * See #3580 - */ - break advance_range; - } - if (logger.isTraceEnabled()) { - logger.trace("Stop balancing index [{}] min_node [{}] weight: [{}] max_node [{}] weight: [{}] delta: [{}]", - index, maxNode.getNodeId(), weights[highIdx], minNode.getNodeId(), weights[lowIdx], delta); - } - break; - } - if (logger.isTraceEnabled()) { - logger.trace("Balancing from node [{}] weight: [{}] to node [{}] weight: [{}] delta: [{}]", - maxNode.getNodeId(), weights[highIdx], minNode.getNodeId(), weights[lowIdx], delta); - } - /* pass the delta to the replication function to prevent relocations that only swap the weights of the two nodes. - * a relocation must bring us closer to the balance if we only achieve the same delta the relocation is useless */ - if (tryRelocateShard(minNode, maxNode, index, delta)) { - /* - * TODO we could be a bit smarter here, we don't need to fully sort necessarily - * we could just find the place to insert linearly but the win might be minor - * compared to the added complexity - */ - weights[lowIdx] = sorter.weight(modelNodes[lowIdx]); - weights[highIdx] = sorter.weight(modelNodes[highIdx]); - sorter.sort(0, relevantNodes); - lowIdx = 0; - highIdx = relevantNodes - 1; - changed = true; - continue; - } - } - if (lowIdx < highIdx - 1) { - /* Shrinking the window from MIN to MAX - * we can't move from any shard from the min node lets move on to the next node - * and see if the threshold still holds. We either don't have any shard of this - * index on this node of allocation deciders prevent any relocation.*/ - lowIdx++; - } else if (lowIdx > 0) { - /* Shrinking the window from MAX to MIN - * now we go max to min since obviously we can't move anything to the max node - * lets pick the next highest */ - lowIdx = 0; - highIdx--; - } else { - /* we are done here, we either can't relocate anymore or we are balanced */ - break; - } - } + } + if (lowIdx < highIdx - 1) { + /* Shrinking the window from MIN to MAX + * we can't move from any shard from the min node lets move on to the next node + * and see if the threshold still holds. We either don't have any shard of this + * index on this node of allocation deciders prevent any relocation.*/ + lowIdx++; + } else if (lowIdx > 0) { + /* Shrinking the window from MAX to MIN + * now we go max to min since obviously we can't move anything to the max node + * lets pick the next highest */ + lowIdx = 0; + highIdx--; + } else { + /* we are done here, we either can't relocate anymore or we are balanced */ + break; } } } @@ -451,7 +426,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards * to the nodes we relocated them from. */ private String[] buildWeightOrderedIndices(NodeSorter sorter) { - final String[] indices = this.indices.toArray(new String[this.indices.size()]); + final String[] indices = allocation.routingTable().indicesRouting().keys().toArray(String.class); final float[] deltas = new float[indices.length]; for (int i = 0; i < deltas.length; i++) { sorter.reset(indices[i]); @@ -503,20 +478,16 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards * @return true if the allocation has changed, otherwise false */ public boolean moveShards() { - if (nodes.isEmpty()) { - /* with no nodes this is pointless */ - return false; - } - - // Create a copy of the started shards interleaving between nodes, and check if they can remain. In the presence of throttling + // Iterate over the started shards interleaving between nodes, and check if they can remain. In the presence of throttling // shard movements, the goal of this iteration order is to achieve a fairer movement of shards from the nodes that are // offloading the shards. - List shards = new ArrayList<>(); + boolean changed = false; int index = 0; boolean found = true; + final NodeSorter sorter = newNodeSorter(); while (found) { found = false; - for (RoutingNode routingNode : routingNodes) { + for (RoutingNode routingNode : allocation.routingNodes()) { if (index >= routingNode.size()) { continue; } @@ -524,64 +495,52 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards ShardRouting shardRouting = routingNode.get(index); // we can only move started shards... if (shardRouting.started()) { - shards.add(shardRouting); + final ModelNode sourceNode = nodes.get(shardRouting.currentNodeId()); + assert sourceNode != null && sourceNode.containsShard(shardRouting); + Decision decision = allocation.deciders().canRemain(shardRouting, routingNode, allocation); + if (decision.type() == Decision.Type.NO) { + changed |= moveShard(sorter, shardRouting, sourceNode, routingNode); + } } } index++; } - if (shards.isEmpty()) { - return false; - } - final RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned(); - boolean changed = initialize(routingNodes, unassigned); - if (changed == false) { - final NodeSorter sorter = newNodeSorter(); - final ModelNode[] modelNodes = sorter.modelNodes; - for (ShardRouting shardRouting : shards) { - final ModelNode sourceNode = nodes.get(shardRouting.currentNodeId()); - assert sourceNode != null && sourceNode.containsShard(shardRouting); - final RoutingNode routingNode = sourceNode.getRoutingNode(routingNodes); - Decision decision = allocation.deciders().canRemain(shardRouting, routingNode, allocation); - if (decision.type() == Decision.Type.NO) { - logger.debug("[{}][{}] allocated on [{}], but can no longer be allocated on it, moving...", shardRouting.index(), shardRouting.id(), routingNode.node()); - sorter.reset(shardRouting.getIndexName()); - /* - * the sorter holds the minimum weight node first for the shards index. - * We now walk through the nodes until we find a node to allocate the shard. - * This is not guaranteed to be balanced after this operation we still try best effort to - * allocate on the minimal eligible node. - */ - boolean moved = false; - for (ModelNode currentNode : modelNodes) { - if (currentNode == sourceNode) { - continue; - } - RoutingNode target = currentNode.getRoutingNode(routingNodes); - Decision allocationDecision = allocation.deciders().canAllocate(shardRouting, target, allocation); - Decision rebalanceDecision = allocation.deciders().canRebalance(shardRouting, allocation); - if (allocationDecision.type() == Type.YES && rebalanceDecision.type() == Type.YES) { // TODO maybe we can respect throttling here too? - Decision sourceDecision = sourceNode.removeShard(shardRouting); - ShardRouting targetRelocatingShard = routingNodes.relocate(shardRouting, target.nodeId(), allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE)); - // re-add (now relocating shard) to source node - sourceNode.addShard(shardRouting, sourceDecision); - Decision targetDecision = new Decision.Multi().add(allocationDecision).add(rebalanceDecision); - currentNode.addShard(targetRelocatingShard, targetDecision); - if (logger.isTraceEnabled()) { - logger.trace("Moved shard [{}] to node [{}]", shardRouting, routingNode.node()); - } - moved = true; - changed = true; - break; - } - } - if (moved == false) { - logger.debug("[{}][{}] can't move", shardRouting.index(), shardRouting.id()); + return changed; + } + + /** + * Move started shard to the minimal eligible node with respect to the weight function + * + * @return true if the shard was moved successfully, otherwise false + */ + private boolean moveShard(NodeSorter sorter, ShardRouting shardRouting, ModelNode sourceNode, RoutingNode routingNode) { + logger.debug("[{}][{}] allocated on [{}], but can no longer be allocated on it, moving...", shardRouting.index(), shardRouting.id(), routingNode.node()); + sorter.reset(shardRouting.getIndexName()); + /* + * the sorter holds the minimum weight node first for the shards index. + * We now walk through the nodes until we find a node to allocate the shard. + * This is not guaranteed to be balanced after this operation we still try best effort to + * allocate on the minimal eligible node. + */ + for (ModelNode currentNode : sorter.modelNodes) { + if (currentNode != sourceNode) { + RoutingNode target = currentNode.getRoutingNode(); + Decision allocationDecision = allocation.deciders().canAllocate(shardRouting, target, allocation); + Decision rebalanceDecision = allocation.deciders().canRebalance(shardRouting, allocation); + if (allocationDecision.type() == Type.YES && rebalanceDecision.type() == Type.YES) { // TODO maybe we can respect throttling here too? + sourceNode.removeShard(shardRouting); + ShardRouting targetRelocatingShard = routingNodes.relocate(shardRouting, target.nodeId(), allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE)); + currentNode.addShard(targetRelocatingShard); + if (logger.isTraceEnabled()) { + logger.trace("Moved shard [{}] to node [{}]", shardRouting, routingNode.node()); } + return true; } } } - return changed; + logger.debug("[{}][{}] can't move", shardRouting.index(), shardRouting.id()); + return false; } /** @@ -593,18 +552,19 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards * on the target node which we respect during the allocation / balancing * process. In short, this method recreates the status-quo in the cluster. */ - private void buildModelFromAssigned(Iterable shards) { - for (ShardRouting shard : shards) { - assert shard.assignedToNode(); - /* we skip relocating shards here since we expect an initializing shard with the same id coming in */ - if (shard.state() == RELOCATING) { - continue; - } - ModelNode node = nodes.get(shard.currentNodeId()); - assert node != null; - node.addShard(shard, Decision.single(Type.YES, "Already allocated on node", node.getNodeId())); - if (logger.isTraceEnabled()) { - logger.trace("Assigned shard [{}] to node [{}]", shard, node.getNodeId()); + private void buildModelFromAssigned() { + for (RoutingNode rn : routingNodes) { + ModelNode node = new ModelNode(rn); + nodes.put(rn.nodeId(), node); + for (ShardRouting shard : rn) { + assert rn.nodeId().equals(shard.currentNodeId()); + /* we skip relocating shards here since we expect an initializing shard with the same id coming in */ + if (shard.state() != RELOCATING) { + node.addShard(shard); + if (logger.isTraceEnabled()) { + logger.trace("Assigned shard [{}] to node [{}]", shard, node.getNodeId()); + } + } } } } @@ -612,8 +572,11 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards /** * Allocates all given shards on the minimal eligible node for the shards index * with respect to the weight function. All given shards must be unassigned. + * @return true if the current configuration has been + * changed, otherwise false */ - private boolean allocateUnassigned(RoutingNodes.UnassignedShards unassigned) { + private boolean allocateUnassigned() { + RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned(); assert !nodes.isEmpty(); if (logger.isTraceEnabled()) { logger.trace("Start allocating unassigned shards"); @@ -657,7 +620,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards int secondaryLength = 0; int primaryLength = primary.length; ArrayUtil.timSort(primary, comparator); - final Set throttledNodes = Collections.newSetFromMap(new IdentityHashMap()); + final Set throttledNodes = Collections.newSetFromMap(new IdentityHashMap<>()); do { for (int i = 0; i < primaryLength; i++) { ShardRouting shard = primary[i]; @@ -695,7 +658,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards * don't check deciders */ if (currentWeight <= minWeight) { - Decision currentDecision = deciders.canAllocate(shard, node.getRoutingNode(routingNodes), allocation); + Decision currentDecision = deciders.canAllocate(shard, node.getRoutingNode(), allocation); NOUPDATE: if (currentDecision.type() == Type.YES || currentDecision.type() == Type.THROTTLE) { if (currentWeight == minWeight) { @@ -736,7 +699,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards } assert decision != null && minNode != null || decision == null && minNode == null; if (minNode != null) { - minNode.addShard(shard, decision); + minNode.addShard(shard); if (decision.type() == Type.YES) { if (logger.isTraceEnabled()) { logger.trace("Assigned shard [{}] to [{}]", shard, minNode.getNodeId()); @@ -745,7 +708,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards changed = true; continue; // don't add to ignoreUnassigned } else { - final RoutingNode node = minNode.getRoutingNode(routingNodes); + final RoutingNode node = minNode.getRoutingNode(); if (deciders.canAllocate(node, allocation).type() != Type.YES) { if (logger.isTraceEnabled()) { logger.trace("Can not allocate on node [{}] remove from round decision [{}]", node, decision.type()); @@ -791,10 +754,10 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards } ShardRouting candidate = null; final AllocationDeciders deciders = allocation.deciders(); - for (ShardRouting shard : index.getAllShards()) { + for (ShardRouting shard : index) { if (shard.started()) { // skip initializing, unassigned and relocating shards we can't relocate them anyway - Decision allocationDecision = deciders.canAllocate(shard, minNode.getRoutingNode(routingNodes), allocation); + Decision allocationDecision = deciders.canAllocate(shard, minNode.getRoutingNode(), allocation); Decision rebalanceDecision = deciders.canRebalance(shard, allocation); if (((allocationDecision.type() == Type.YES) || (allocationDecision.type() == Type.THROTTLE)) && ((rebalanceDecision.type() == Type.YES) || (rebalanceDecision.type() == Type.THROTTLE))) { @@ -815,24 +778,17 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards } if (candidate != null) { - /* allocate on the model even if not throttled */ maxNode.removeShard(candidate); - minNode.addShard(candidate, decision); + minNode.addShard(candidate); if (decision.type() == Type.YES) { /* only allocate on the cluster if we are not throttled */ if (logger.isTraceEnabled()) { logger.trace("Relocate shard [{}] from node [{}] to node [{}]", candidate, maxNode.getNodeId(), minNode.getNodeId()); } - /* now allocate on the cluster - if we are started we need to relocate the shard */ - if (candidate.started()) { - routingNodes.relocate(candidate, minNode.getNodeId(), allocation.clusterInfo().getShardSize(candidate, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE)); - - } else { - routingNodes.initialize(candidate, minNode.getNodeId(), null, allocation.clusterInfo().getShardSize(candidate, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE)); - } + /* now allocate on the cluster */ + routingNodes.relocate(candidate, minNode.getNodeId(), allocation.clusterInfo().getShardSize(candidate, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE)); return true; - } } } @@ -846,14 +802,12 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards } static class ModelNode implements Iterable { - private final String id; private final Map indices = new HashMap<>(); private int numShards = 0; - // lazily calculated - private RoutingNode routingNode; + private final RoutingNode routingNode; - public ModelNode(String id) { - this.id = id; + public ModelNode(RoutingNode routingNode) { + this.routingNode = routingNode; } public ModelIndex getIndex(String indexId) { @@ -861,13 +815,10 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards } public String getNodeId() { - return id; + return routingNode.nodeId(); } - public RoutingNode getRoutingNode(RoutingNodes routingNodes) { - if (routingNode == null) { - routingNode = routingNodes.node(id); - } + public RoutingNode getRoutingNode() { return routingNode; } @@ -888,33 +839,31 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards return -1; } - public void addShard(ShardRouting shard, Decision decision) { + public void addShard(ShardRouting shard) { ModelIndex index = indices.get(shard.getIndexName()); if (index == null) { index = new ModelIndex(shard.getIndexName()); indices.put(index.getIndexId(), index); } - index.addShard(shard, decision); + index.addShard(shard); numShards++; } - public Decision removeShard(ShardRouting shard) { + public void removeShard(ShardRouting shard) { ModelIndex index = indices.get(shard.getIndexName()); - Decision removed = null; if (index != null) { - removed = index.removeShard(shard); - if (removed != null && index.numShards() == 0) { + index.removeShard(shard); + if (index.numShards() == 0) { indices.remove(shard.getIndexName()); } } numShards--; - return removed; } @Override public String toString() { StringBuilder sb = new StringBuilder(); - sb.append("Node(").append(id).append(")"); + sb.append("Node(").append(routingNode.nodeId()).append(")"); return sb.toString(); } @@ -930,9 +879,9 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards } - static final class ModelIndex { + static final class ModelIndex implements Iterable { private final String id; - private final Map shards = new HashMap<>(); + private final Set shards = new HashSet<>(4); // expect few shards of same index to be allocated on same node private int highestPrimary = -1; public ModelIndex(String id) { @@ -942,7 +891,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards public int highestPrimary() { if (highestPrimary == -1) { int maxId = -1; - for (ShardRouting shard : shards.keySet()) { + for (ShardRouting shard : shards) { if (shard.primary()) { maxId = Math.max(maxId, shard.id()); } @@ -960,24 +909,25 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards return shards.size(); } - public Collection getAllShards() { - return shards.keySet(); + @Override + public Iterator iterator() { + return shards.iterator(); } - public Decision removeShard(ShardRouting shard) { + public void removeShard(ShardRouting shard) { highestPrimary = -1; - return shards.remove(shard); + assert shards.contains(shard) : "Shard not allocated on current node: " + shard; + shards.remove(shard); } - public void addShard(ShardRouting shard, Decision decision) { + public void addShard(ShardRouting shard) { highestPrimary = -1; - assert decision != null; - assert !shards.containsKey(shard) : "Shard already allocated on current node: " + shards.get(shard) + " " + shard; - shards.put(shard, decision); + assert !shards.contains(shard) : "Shard already allocated on current node: " + shard; + shards.add(shard); } public boolean containsShard(ShardRouting shard) { - return shards.containsKey(shard); + return shards.contains(shard); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocator.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocator.java index 4d9c05527d3..0bf07e8cba9 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocator.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocator.java @@ -19,56 +19,25 @@ package org.elasticsearch.cluster.routing.allocation.allocator; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.ShardRoutingState; -import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation; /** *

* A {@link ShardsAllocator} is the main entry point for shard allocation on nodes in the cluster. * The allocator makes basic decision where a shard instance will be allocated, if already allocated instances - * need relocate to other nodes due to node failures or due to rebalancing decisions. + * need to relocate to other nodes due to node failures or due to rebalancing decisions. *

*/ public interface ShardsAllocator { /** - * Applies changes on started nodes based on the implemented algorithm. For example if a - * shard has changed to {@link ShardRoutingState#STARTED} from {@link ShardRoutingState#RELOCATING} - * this allocator might apply some cleanups on the node that used to hold the shard. - * @param allocation all started {@link ShardRouting shards} - */ - void applyStartedShards(StartedRerouteAllocation allocation); - - /** - * Applies changes on failed nodes based on the implemented algorithm. - * @param allocation all failed {@link ShardRouting shards} - */ - void applyFailedShards(FailedRerouteAllocation allocation); - - /** - * Assign all unassigned shards to nodes + * Allocates shards to nodes in the cluster. An implementation of this method should: + * - assign unassigned shards + * - relocate shards that cannot stay on a node anymore + * - relocate shards to find a good shard balance in the cluster * * @param allocation current node allocation * @return true if the allocation has changed, otherwise false */ - boolean allocateUnassigned(RoutingAllocation allocation); - - /** - * Rebalancing number of shards on all nodes - * - * @param allocation current node allocation - * @return true if the allocation has changed, otherwise false - */ - boolean rebalance(RoutingAllocation allocation); - - /** - * Move started shards that can not be allocated to a node anymore - * - * @param allocation current node allocation - * @return true if the allocation has changed, otherwise false - */ - boolean moveShards(RoutingAllocation allocation); + boolean allocate(RoutingAllocation allocation); } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocators.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocators.java deleted file mode 100644 index f3eb1ebbf14..00000000000 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocators.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cluster.routing.allocation.allocator; - -import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation; -import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.gateway.GatewayAllocator; - -/** - * The {@link ShardsAllocator} class offers methods for allocating shard within a cluster. - * These methods include moving shards and re-balancing the cluster. It also allows management - * of shards by their state. - */ -public class ShardsAllocators extends AbstractComponent implements ShardsAllocator { - - private final GatewayAllocator gatewayAllocator; - private final ShardsAllocator allocator; - - public ShardsAllocators(GatewayAllocator allocator) { - this(Settings.Builder.EMPTY_SETTINGS, allocator); - } - - public ShardsAllocators(Settings settings, GatewayAllocator allocator) { - this(settings, allocator, new BalancedShardsAllocator(settings)); - } - - @Inject - public ShardsAllocators(Settings settings, GatewayAllocator gatewayAllocator, ShardsAllocator allocator) { - super(settings); - this.gatewayAllocator = gatewayAllocator; - this.allocator = allocator; - } - - @Override - public void applyStartedShards(StartedRerouteAllocation allocation) { - gatewayAllocator.applyStartedShards(allocation); - allocator.applyStartedShards(allocation); - } - - @Override - public void applyFailedShards(FailedRerouteAllocation allocation) { - gatewayAllocator.applyFailedShards(allocation); - allocator.applyFailedShards(allocation); - } - - @Override - public boolean allocateUnassigned(RoutingAllocation allocation) { - boolean changed = false; - changed |= gatewayAllocator.allocateUnassigned(allocation); - changed |= allocator.allocateUnassigned(allocation); - return changed; - } - - protected long nanoTime() { - return System.nanoTime(); - } - - @Override - public boolean rebalance(RoutingAllocation allocation) { - if (allocation.hasPendingAsyncFetch() == false) { - /* - * see https://github.com/elastic/elasticsearch/issues/14387 - * if we allow rebalance operations while we are still fetching shard store data - * we might end up with unnecessary rebalance operations which can be super confusion/frustrating - * since once the fetches come back we might just move all the shards back again. - * Therefore we only do a rebalance if we have fetched all information. - */ - return allocator.rebalance(allocation); - } else { - logger.debug("skipping rebalance due to in-flight shard/store fetches"); - return false; - } - } - - @Override - public boolean moveShards(RoutingAllocation allocation) { - return allocator.moveShards(allocation); - } -} diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateStalePrimaryAllocationCommand.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateStalePrimaryAllocationCommand.java index 5ccd9e9bb63..f4b1be19af4 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateStalePrimaryAllocationCommand.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateStalePrimaryAllocationCommand.java @@ -112,7 +112,7 @@ public class AllocateStalePrimaryAllocationCommand extends BasePrimaryAllocation "allocating an empty primary for [" + index + "][" + shardId + "] can result in data loss. Please confirm by setting the accept_data_loss parameter to true"); } - final IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.getIndexName()); + final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index()); if (shardRouting.allocatedPostIndexCreate(indexMetaData) == false) { return explainOrThrowRejectedCommand(explain, allocation, "trying to allocate an existing primary shard [" + index + "][" + shardId + "], while no such shard has ever been active"); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java index 9859a9b6584..227ec277469 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import java.util.HashMap; @@ -77,8 +78,11 @@ public class AwarenessAllocationDecider extends AllocationDecider { public static final String NAME = "awareness"; - public static final Setting CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING = new Setting<>("cluster.routing.allocation.awareness.attributes", "", Strings::splitStringByCommaToArray , true, Setting.Scope.CLUSTER); - public static final Setting CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.awareness.force.", true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING = + new Setting<>("cluster.routing.allocation.awareness.attributes", "", Strings::splitStringByCommaToArray , Property.Dynamic, + Property.NodeScope); + public static final Setting CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING = + Setting.groupSetting("cluster.routing.allocation.awareness.force.", Property.Dynamic, Property.NodeScope); private String[] awarenessAttributes; @@ -149,7 +153,7 @@ public class AwarenessAllocationDecider extends AllocationDecider { return allocation.decision(Decision.YES, NAME, "no allocation awareness enabled"); } - IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.index()); + IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index()); int shardCount = indexMetaData.getNumberOfReplicas() + 1; // 1 for primary for (String awarenessAttribute : awarenessAttributes) { // the node the shard exists on must be associated with an awareness attribute diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java index 11fce397b26..84e974aceb0 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java @@ -24,6 +24,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import java.util.Locale; @@ -48,7 +49,9 @@ import java.util.Locale; public class ClusterRebalanceAllocationDecider extends AllocationDecider { public static final String NAME = "cluster_rebalance"; - public static final Setting CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING = new Setting<>("cluster.routing.allocation.allow_rebalance", ClusterRebalanceType.INDICES_ALL_ACTIVE.name().toLowerCase(Locale.ROOT), ClusterRebalanceType::parseString, true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING = + new Setting<>("cluster.routing.allocation.allow_rebalance", ClusterRebalanceType.INDICES_ALL_ACTIVE.name().toLowerCase(Locale.ROOT), + ClusterRebalanceType::parseString, Property.Dynamic, Property.NodeScope); /** * An enum representation for the configured re-balance type. diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java index d39b9604066..fe6bf918dc2 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java @@ -24,6 +24,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; /** @@ -42,7 +43,9 @@ public class ConcurrentRebalanceAllocationDecider extends AllocationDecider { public static final String NAME = "concurrent_rebalance"; - public static final Setting CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING = Setting.intSetting("cluster.routing.allocation.cluster_concurrent_rebalance", 2, -1, true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING = + Setting.intSetting("cluster.routing.allocation.cluster_concurrent_rebalance", 2, -1, + Property.Dynamic, Property.NodeScope); private volatile int clusterConcurrentRebalance; @Inject diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index 821fa55d704..e2124558f2d 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -38,6 +38,7 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.RatioValue; @@ -81,11 +82,22 @@ public class DiskThresholdDecider extends AllocationDecider { private volatile boolean enabled; private volatile TimeValue rerouteInterval; - public static final Setting CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING = Setting.boolSetting("cluster.routing.allocation.disk.threshold_enabled", true, true, Setting.Scope.CLUSTER); - public static final Setting CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING = new Setting<>("cluster.routing.allocation.disk.watermark.low", "85%", (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.low"), true, Setting.Scope.CLUSTER); - public static final Setting CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING = new Setting<>("cluster.routing.allocation.disk.watermark.high", "90%", (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.high"), true, Setting.Scope.CLUSTER); - public static final Setting CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING = Setting.boolSetting("cluster.routing.allocation.disk.include_relocations", true, true, Setting.Scope.CLUSTER);; - public static final Setting CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING = Setting.positiveTimeSetting("cluster.routing.allocation.disk.reroute_interval", TimeValue.timeValueSeconds(60), true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING = + Setting.boolSetting("cluster.routing.allocation.disk.threshold_enabled", true, Property.Dynamic, Property.NodeScope); + public static final Setting CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING = + new Setting<>("cluster.routing.allocation.disk.watermark.low", "85%", + (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.low"), + Property.Dynamic, Property.NodeScope); + public static final Setting CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING = + new Setting<>("cluster.routing.allocation.disk.watermark.high", "90%", + (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.high"), + Property.Dynamic, Property.NodeScope); + public static final Setting CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING = + Setting.boolSetting("cluster.routing.allocation.disk.include_relocations", true, + Property.Dynamic, Property.NodeScope);; + public static final Setting CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING = + Setting.positiveTimeSetting("cluster.routing.allocation.disk.reroute_interval", TimeValue.timeValueSeconds(60), + Property.Dynamic, Property.NodeScope); /** * Listens for a node to go over the high watermark and kicks off an empty @@ -330,7 +342,7 @@ public class DiskThresholdDecider extends AllocationDecider { } // a flag for whether the primary shard has been previously allocated - IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.getIndexName()); + IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index()); boolean primaryHasBeenAllocated = shardRouting.primary() && shardRouting.allocatedPostIndexCreate(indexMetaData); // checks for exact byte comparisons diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java index 9131355876b..0b69ba2a19e 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import java.util.Locale; @@ -60,11 +61,19 @@ public class EnableAllocationDecider extends AllocationDecider { public static final String NAME = "enable"; - public static final Setting CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING = new Setting<>("cluster.routing.allocation.enable", Allocation.ALL.name(), Allocation::parse, true, Setting.Scope.CLUSTER); - public static final Setting INDEX_ROUTING_ALLOCATION_ENABLE_SETTING = new Setting<>("index.routing.allocation.enable", Allocation.ALL.name(), Allocation::parse, true, Setting.Scope.INDEX); + public static final Setting CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING = + new Setting<>("cluster.routing.allocation.enable", Allocation.ALL.name(), Allocation::parse, + Property.Dynamic, Property.NodeScope); + public static final Setting INDEX_ROUTING_ALLOCATION_ENABLE_SETTING = + new Setting<>("index.routing.allocation.enable", Allocation.ALL.name(), Allocation::parse, + Property.Dynamic, Property.IndexScope); - public static final Setting CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING = new Setting<>("cluster.routing.rebalance.enable", Rebalance.ALL.name(), Rebalance::parse, true, Setting.Scope.CLUSTER); - public static final Setting INDEX_ROUTING_REBALANCE_ENABLE_SETTING = new Setting<>("index.routing.rebalance.enable", Rebalance.ALL.name(), Rebalance::parse, true, Setting.Scope.INDEX); + public static final Setting CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING = + new Setting<>("cluster.routing.rebalance.enable", Rebalance.ALL.name(), Rebalance::parse, + Property.Dynamic, Property.NodeScope); + public static final Setting INDEX_ROUTING_REBALANCE_ENABLE_SETTING = + new Setting<>("index.routing.rebalance.enable", Rebalance.ALL.name(), Rebalance::parse, + Property.Dynamic, Property.IndexScope); private volatile Rebalance enableRebalance; private volatile Allocation enableAllocation; @@ -92,7 +101,7 @@ public class EnableAllocationDecider extends AllocationDecider { return allocation.decision(Decision.YES, NAME, "allocation disabling is ignored"); } - final IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.getIndexName()); + final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index()); final Allocation enable; if (INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.exists(indexMetaData.getSettings())) { enable = INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.get(indexMetaData.getSettings()); @@ -127,7 +136,7 @@ public class EnableAllocationDecider extends AllocationDecider { return allocation.decision(Decision.YES, NAME, "rebalance disabling is ignored"); } - Settings indexSettings = allocation.routingNodes().metaData().index(shardRouting.index()).getSettings(); + Settings indexSettings = allocation.routingNodes().metaData().getIndexSafe(shardRouting.index()).getSettings(); final Rebalance enable; if (INDEX_ROUTING_REBALANCE_ENABLE_SETTING.exists(indexSettings)) { enable = INDEX_ROUTING_REBALANCE_ENABLE_SETTING.get(indexSettings); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java index f8ff5f37aed..d1aa0d8b583 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java @@ -27,6 +27,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND; @@ -60,9 +61,12 @@ public class FilterAllocationDecider extends AllocationDecider { public static final String NAME = "filter"; - public static final Setting CLUSTER_ROUTING_REQUIRE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.require.", true, Setting.Scope.CLUSTER); - public static final Setting CLUSTER_ROUTING_INCLUDE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.include.", true, Setting.Scope.CLUSTER); - public static final Setting CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.exclude.", true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_REQUIRE_GROUP_SETTING = + Setting.groupSetting("cluster.routing.allocation.require.", Property.Dynamic, Property.NodeScope); + public static final Setting CLUSTER_ROUTING_INCLUDE_GROUP_SETTING = + Setting.groupSetting("cluster.routing.allocation.include.", Property.Dynamic, Property.NodeScope); + public static final Setting CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING = + Setting.groupSetting("cluster.routing.allocation.exclude.", Property.Dynamic, Property.NodeScope); private volatile DiscoveryNodeFilters clusterRequireFilters; private volatile DiscoveryNodeFilters clusterIncludeFilters; @@ -98,7 +102,7 @@ public class FilterAllocationDecider extends AllocationDecider { Decision decision = shouldClusterFilter(node, allocation); if (decision != null) return decision; - decision = shouldIndexFilter(allocation.routingNodes().metaData().index(shardRouting.index()), node, allocation); + decision = shouldIndexFilter(allocation.routingNodes().metaData().getIndexSafe(shardRouting.index()), node, allocation); if (decision != null) return decision; return allocation.decision(Decision.YES, NAME, "node passes include/exclude/require filters"); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java index e766b4c49aa..04247525f1d 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java @@ -27,6 +27,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; /** @@ -59,13 +60,17 @@ public class ShardsLimitAllocationDecider extends AllocationDecider { * Controls the maximum number of shards per index on a single Elasticsearch * node. Negative values are interpreted as unlimited. */ - public static final Setting INDEX_TOTAL_SHARDS_PER_NODE_SETTING = Setting.intSetting("index.routing.allocation.total_shards_per_node", -1, -1, true, Setting.Scope.INDEX); + public static final Setting INDEX_TOTAL_SHARDS_PER_NODE_SETTING = + Setting.intSetting("index.routing.allocation.total_shards_per_node", -1, -1, + Property.Dynamic, Property.IndexScope); /** * Controls the maximum number of shards per node on a global level. * Negative values are interpreted as unlimited. */ - public static final Setting CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING = Setting.intSetting("cluster.routing.allocation.total_shards_per_node", -1, -1, true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING = + Setting.intSetting("cluster.routing.allocation.total_shards_per_node", -1, -1, + Property.Dynamic, Property.NodeScope); @Inject @@ -81,7 +86,7 @@ public class ShardsLimitAllocationDecider extends AllocationDecider { @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { - IndexMetaData indexMd = allocation.routingNodes().metaData().index(shardRouting.index()); + IndexMetaData indexMd = allocation.routingNodes().metaData().getIndexSafe(shardRouting.index()); final int indexShardLimit = INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(indexMd.getSettings(), settings); // Capture the limit here in case it changes during this method's // execution @@ -118,7 +123,7 @@ public class ShardsLimitAllocationDecider extends AllocationDecider { @Override public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { - IndexMetaData indexMd = allocation.routingNodes().metaData().index(shardRouting.index()); + IndexMetaData indexMd = allocation.routingNodes().metaData().getIndexSafe(shardRouting.index()); final int indexShardLimit = INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(indexMd.getSettings(), settings); // Capture the limit here in case it changes during this method's // execution diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java index cf889cde6ad..d656afc8036 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; /** @@ -39,7 +40,9 @@ public class SnapshotInProgressAllocationDecider extends AllocationDecider { /** * Disables relocation of shards that are currently being snapshotted. */ - public static final Setting CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING = Setting.boolSetting("cluster.routing.allocation.snapshot.relocation_enabled", false, true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING = + Setting.boolSetting("cluster.routing.allocation.snapshot.relocation_enabled", false, + Property.Dynamic, Property.NodeScope); private volatile boolean enableRelocation = false; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java index 25f43f57610..ca6b312da4c 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java @@ -25,6 +25,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; /** @@ -50,10 +51,25 @@ public class ThrottlingAllocationDecider extends AllocationDecider { public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES = 2; public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES = 4; public static final String NAME = "throttling"; - public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING = new Setting<>("cluster.routing.allocation.node_concurrent_recoveries", Integer.toString(DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES), (s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_recoveries"), true, Setting.Scope.CLUSTER); - public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING = Setting.intSetting("cluster.routing.allocation.node_initial_primaries_recoveries", DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, 0, true, Setting.Scope.CLUSTER); - public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING = new Setting<>("cluster.routing.allocation.node_concurrent_incoming_recoveries", (s) -> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getRaw(s), (s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_incoming_recoveries"), true, Setting.Scope.CLUSTER); - public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING = new Setting<>("cluster.routing.allocation.node_concurrent_outgoing_recoveries", (s) -> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getRaw(s), (s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_outgoing_recoveries"), true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING = + new Setting<>("cluster.routing.allocation.node_concurrent_recoveries", + Integer.toString(DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES), + (s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_recoveries"), + Property.Dynamic, Property.NodeScope); + public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING = + Setting.intSetting("cluster.routing.allocation.node_initial_primaries_recoveries", + DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, 0, + Property.Dynamic, Property.NodeScope); + public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING = + new Setting<>("cluster.routing.allocation.node_concurrent_incoming_recoveries", + (s) -> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getRaw(s), + (s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_incoming_recoveries"), + Property.Dynamic, Property.NodeScope); + public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING = + new Setting<>("cluster.routing.allocation.node_concurrent_outgoing_recoveries", + (s) -> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getRaw(s), + (s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_outgoing_recoveries"), + Property.Dynamic, Property.NodeScope); private volatile int primariesInitialRecoveries; diff --git a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java similarity index 75% rename from core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java rename to core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java index 3d70ac84e33..fa9b3492685 100644 --- a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java +++ b/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java @@ -19,11 +19,9 @@ package org.elasticsearch.cluster.service; -import org.elasticsearch.Version; import org.elasticsearch.cluster.AckedClusterStateTaskListener; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState.Builder; import org.elasticsearch.cluster.ClusterStateListener; @@ -32,19 +30,18 @@ import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.LocalNodeMasterListener; +import org.elasticsearch.cluster.NodeConnectionsService; import org.elasticsearch.cluster.TimeoutClusterStateListener; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodeService; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.OperationRouting; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; @@ -52,9 +49,9 @@ import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.text.Text; -import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.CountDown; @@ -65,9 +62,7 @@ import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; import org.elasticsearch.common.util.concurrent.PrioritizedRunnable; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; import java.util.ArrayList; import java.util.Collection; @@ -78,8 +73,6 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Queue; -import java.util.Random; -import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.Executor; import java.util.concurrent.Future; @@ -94,28 +87,20 @@ import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadF /** * */ -public class InternalClusterService extends AbstractLifecycleComponent implements ClusterService { +public class ClusterService extends AbstractLifecycleComponent { - public static final Setting CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING = Setting.positiveTimeSetting("cluster.service.slow_task_logging_threshold", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER); - public static final Setting CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING = Setting.positiveTimeSetting("cluster.service.reconnect_interval", TimeValue.timeValueSeconds(10), false, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING = + Setting.positiveTimeSetting("cluster.service.slow_task_logging_threshold", TimeValue.timeValueSeconds(30), + Property.Dynamic, Property.NodeScope); public static final String UPDATE_THREAD_NAME = "clusterService#updateTask"; - public static final Setting NODE_ID_SEED_SETTING = - // don't use node.id.seed so it won't be seen as an attribute - Setting.longSetting("node_id.seed", 0L, Long.MIN_VALUE, false, Setting.Scope.CLUSTER); private final ThreadPool threadPool; private BiConsumer clusterStatePublisher; private final OperationRouting operationRouting; - private final TransportService transportService; - private final ClusterSettings clusterSettings; - private final DiscoveryNodeService discoveryNodeService; - private final Version version; - - private final TimeValue reconnectInterval; private TimeValue slowTaskLoggingThreshold; @@ -130,7 +115,8 @@ public class InternalClusterService extends AbstractLifecycleComponent> updateTasksPerExecutor = new HashMap<>(); // TODO this is rather frequently changing I guess a Synced Set would be better here and a dedicated remove API private final Collection postAppliedListeners = new CopyOnWriteArrayList<>(); - private final Iterable preAppliedListeners = Iterables.concat(priorityClusterStateListeners, clusterStateListeners, lastClusterStateListeners); + private final Iterable preAppliedListeners = Iterables.concat(priorityClusterStateListeners, + clusterStateListeners, lastClusterStateListeners); private final LocalNodeMasterListeners localNodeMasterListeners; @@ -140,60 +126,69 @@ public class InternalClusterService extends AbstractLifecycleComponent publisher) { + synchronized public void setClusterStatePublisher(BiConsumer publisher) { clusterStatePublisher = publisher; } - @Override - public void addInitialStateBlock(ClusterBlock block) throws IllegalStateException { + synchronized public void setLocalNode(DiscoveryNode localNode) { + assert clusterState.nodes().localNodeId() == null : "local node is already set"; + DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()).put(localNode).localNodeId(localNode.id()); + this.clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build(); + } + + synchronized public void setNodeConnectionsService(NodeConnectionsService nodeConnectionsService) { + assert this.nodeConnectionsService == null : "nodeConnectionsService is already set"; + this.nodeConnectionsService = nodeConnectionsService; + } + + /** + * Adds an initial block to be set on the first cluster state created. + */ + synchronized public void addInitialStateBlock(ClusterBlock block) throws IllegalStateException { if (lifecycle.started()) { throw new IllegalStateException("can't set initial block when started"); } initialBlocks.addGlobalBlock(block); } - @Override - public void removeInitialStateBlock(ClusterBlock block) throws IllegalStateException { + /** + * Remove an initial block to be set on the first cluster state created. + */ + synchronized public void removeInitialStateBlock(ClusterBlock block) throws IllegalStateException { removeInitialStateBlock(block.id()); } - @Override - public void removeInitialStateBlock(int blockId) throws IllegalStateException { + /** + * Remove an initial block to be set on the first cluster state created. + */ + synchronized public void removeInitialStateBlock(int blockId) throws IllegalStateException { if (lifecycle.started()) { throw new IllegalStateException("can't set initial block when started"); } @@ -201,69 +196,77 @@ public class InternalClusterService extends AbstractLifecycleComponent nodeAttributes = discoveryNodeService.buildAttributes(); - // note, we rely on the fact that its a new id each time we start, see FD and "kill -9" handling - final String nodeId = generateNodeId(settings); - final TransportAddress publishAddress = transportService.boundAddress().publishAddress(); - DiscoveryNode localNode = new DiscoveryNode(settings.get("node.name"), nodeId, publishAddress, nodeAttributes, version); - DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder().put(localNode).localNodeId(localNode.id()); - this.clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).blocks(initialBlocks).build(); - this.transportService.setLocalNode(localNode); + this.updateTasksExecutor = EsExecutors.newSinglePrioritizing(UPDATE_THREAD_NAME, daemonThreadFactory(settings, UPDATE_THREAD_NAME), + threadPool.getThreadContext()); + this.clusterState = ClusterState.builder(clusterState).blocks(initialBlocks).build(); } @Override - protected void doStop() { - FutureUtils.cancel(this.reconnectToNodes); + synchronized protected void doStop() { for (NotifyTimeout onGoingTimeout : onGoingTimeouts) { - onGoingTimeout.cancel(); - onGoingTimeout.listener.onClose(); + try { + onGoingTimeout.cancel(); + onGoingTimeout.listener.onClose(); + } catch (Exception ex) { + logger.debug("failed to notify listeners on shutdown", ex); + } } ThreadPool.terminate(updateTasksExecutor, 10, TimeUnit.SECONDS); remove(localNodeMasterListeners); } @Override - protected void doClose() { + synchronized protected void doClose() { } - @Override + /** + * The local node. + */ public DiscoveryNode localNode() { return clusterState.getNodes().localNode(); } - @Override public OperationRouting operationRouting() { return operationRouting; } - @Override + /** + * The current state. + */ public ClusterState state() { return this.clusterState; } - @Override + /** + * Adds a priority listener for updated cluster states. + */ public void addFirst(ClusterStateListener listener) { priorityClusterStateListeners.add(listener); } - @Override + /** + * Adds last listener. + */ public void addLast(ClusterStateListener listener) { lastClusterStateListeners.add(listener); } - @Override + /** + * Adds a listener for updated cluster states. + */ public void add(ClusterStateListener listener) { clusterStateListeners.add(listener); } - @Override + /** + * Removes a listener for updated cluster states. + */ public void remove(ClusterStateListener listener) { clusterStateListeners.remove(listener); priorityClusterStateListeners.remove(listener); @@ -278,17 +281,27 @@ public class InternalClusterService extends AbstractLifecycleComponent the type of the cluster state update task state + */ public void submitStateUpdateTask(final String source, final T task, final ClusterStateTaskConfig config, final ClusterStateTaskExecutor executor, @@ -333,9 +370,9 @@ public class InternalClusterService extends AbstractLifecycleComponent void innerSubmitStateUpdateTask(final String source, final T task, - final ClusterStateTaskConfig config, - final ClusterStateTaskExecutor executor, - final SafeClusterStateTaskListener listener) { + final ClusterStateTaskConfig config, + final ClusterStateTaskExecutor executor, + final SafeClusterStateTaskListener listener) { if (!lifecycle.started()) { return; } @@ -351,7 +388,8 @@ public class InternalClusterService extends AbstractLifecycleComponent pendingTasks() { PrioritizedEsThreadPoolExecutor.Pending[] pendings = updateTasksExecutor.getPending(); List pendingClusterTasks = new ArrayList<>(pendings.length); @@ -385,29 +425,32 @@ public class InternalClusterService extends AbstractLifecycleComponent batchResult; - long startTimeNS = System.nanoTime(); + long startTimeNS = currentTimeInNanos(); try { List inputs = toExecute.stream().map(tUpdateTask -> tUpdateTask.task).collect(Collectors.toList()); batchResult = executor.execute(previousClusterState, inputs); } catch (Throwable e) { - TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS))); + TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS))); if (logger.isTraceEnabled()) { - StringBuilder sb = new StringBuilder("failed to execute cluster state update in ").append(executionTime).append(", state:\nversion [").append(previousClusterState.version()).append("], source [").append(source).append("]\n"); - sb.append(previousClusterState.nodes().prettyPrint()); - sb.append(previousClusterState.routingTable().prettyPrint()); - sb.append(previousClusterState.getRoutingNodes().prettyPrint()); - logger.trace(sb.toString(), e); + logger.trace("failed to execute cluster state update in [{}], state:\nversion [{}], source [{}]\n{}{}{}", e, executionTime, + previousClusterState.version(), source, previousClusterState.nodes().prettyPrint(), + previousClusterState.routingTable().prettyPrint(), previousClusterState.getRoutingNodes().prettyPrint()); } warnAboutSlowTaskIfNeeded(executionTime, source); - batchResult = ClusterStateTaskExecutor.BatchResult.builder().failures(toExecute.stream().map(updateTask -> updateTask.task)::iterator, e).build(previousClusterState); + batchResult = ClusterStateTaskExecutor.BatchResult.builder() + .failures(toExecute.stream().map(updateTask -> updateTask.task)::iterator, e) + .build(previousClusterState); } assert batchResult.executionResults != null; assert batchResult.executionResults.size() == toExecute.size() - : String.format(Locale.ROOT, "expected [%d] task result%s but was [%d]", toExecute.size(), toExecute.size() == 1 ? "" : "s", batchResult.executionResults.size()); + : String.format(Locale.ROOT, "expected [%d] task result%s but was [%d]", toExecute.size(), + toExecute.size() == 1 ? "" : "s", batchResult.executionResults.size()); boolean assertsEnabled = false; assert (assertsEnabled = true); if (assertsEnabled) { @@ -493,11 +537,11 @@ public class InternalClusterService extends AbstractLifecycleComponent proccessedListeners.add(updateTask), - ex -> { - logger.debug("cluster state update task [{}] failed", ex, updateTask.source); - updateTask.listener.onFailure(updateTask.source, ex); - } + () -> proccessedListeners.add(updateTask), + ex -> { + logger.debug("cluster state update task [{}] failed", ex, updateTask.source); + updateTask.listener.onFailure(updateTask.source, ex); + } ); } @@ -509,8 +553,8 @@ public class InternalClusterService extends AbstractLifecycleComponent executor, ClusterStateTaskListener listener) { + UpdateTask(String source, T task, ClusterStateTaskConfig config, ClusterStateTaskExecutor executor, + ClusterStateTaskListener listener) { super(config.priority(), source); this.task = task; this.config = config; @@ -777,7 +811,8 @@ public class InternalClusterService extends AbstractLifecycleComponent slowTaskLoggingThreshold.getMillis()) { - logger.warn("cluster state update task [{}] took {} above the warn threshold of {}", source, executionTime, slowTaskLoggingThreshold); + logger.warn("cluster state update task [{}] took [{}] above the warn threshold of {}", source, executionTime, + slowTaskLoggingThreshold); } } @@ -809,64 +844,6 @@ public class InternalClusterService extends AbstractLifecycleComponent failureCount = ConcurrentCollections.newConcurrentMap(); - - @Override - public void run() { - // master node will check against all nodes if its alive with certain discoveries implementations, - // but we can't rely on that, so we check on it as well - for (DiscoveryNode node : clusterState.nodes()) { - if (lifecycle.stoppedOrClosed()) { - return; - } - if (clusterState.nodes().nodeExists(node.id())) { // we double check existence of node since connectToNode might take time... - if (!transportService.nodeConnected(node)) { - try { - transportService.connectToNode(node); - } catch (Exception e) { - if (lifecycle.stoppedOrClosed()) { - return; - } - if (clusterState.nodes().nodeExists(node.id())) { // double check here as well, maybe its gone? - Integer nodeFailureCount = failureCount.get(node); - if (nodeFailureCount == null) { - nodeFailureCount = 1; - } else { - nodeFailureCount = nodeFailureCount + 1; - } - // log every 6th failure - if ((nodeFailureCount % 6) == 0) { - // reset the failure count... - nodeFailureCount = 0; - logger.warn("failed to reconnect to node {}", e, node); - } - failureCount.put(node, nodeFailureCount); - } - } - } - } - } - // go over and remove failed nodes that have been removed - DiscoveryNodes nodes = clusterState.nodes(); - for (Iterator failedNodesIt = failureCount.keySet().iterator(); failedNodesIt.hasNext(); ) { - DiscoveryNode failedNode = failedNodesIt.next(); - if (!nodes.nodeExists(failedNode.id())) { - failedNodesIt.remove(); - } - } - if (lifecycle.started()) { - reconnectToNodes = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, this); - } - } - } - - public static String generateNodeId(Settings settings) { - Random random = Randomness.get(settings, NODE_ID_SEED_SETTING); - return Strings.randomBase64UUID(random); - } - private static class LocalNodeMasterListeners implements ClusterStateListener { private final List listeners = new CopyOnWriteArrayList<>(); @@ -970,7 +947,8 @@ public class InternalClusterService extends AbstractLifecycleComponent ackTimeoutCallback; private Throwable lastFailure; - AckCountDownListener(AckedClusterStateTaskListener ackedTaskListener, long clusterStateVersion, DiscoveryNodes nodes, ThreadPool threadPool) { + AckCountDownListener(AckedClusterStateTaskListener ackedTaskListener, long clusterStateVersion, DiscoveryNodes nodes, + ThreadPool threadPool) { this.ackedTaskListener = ackedTaskListener; this.clusterStateVersion = clusterStateVersion; this.nodes = nodes; diff --git a/core/src/main/java/org/elasticsearch/common/ParseField.java b/core/src/main/java/org/elasticsearch/common/ParseField.java index 0aad723e6fb..a0978723d0e 100644 --- a/core/src/main/java/org/elasticsearch/common/ParseField.java +++ b/core/src/main/java/org/elasticsearch/common/ParseField.java @@ -18,26 +18,23 @@ */ package org.elasticsearch.common; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; -import java.util.EnumSet; import java.util.HashSet; /** * Holds a field that can be found in a request while parsing and its different variants, which may be deprecated. */ public class ParseField { + + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(ParseField.class)); + private final String camelCaseName; private final String underscoreName; private final String[] deprecatedNames; private String allReplacedWith = null; - static final EnumSet EMPTY_FLAGS = EnumSet.noneOf(Flag.class); - static final EnumSet STRICT_FLAGS = EnumSet.of(Flag.STRICT); - - enum Flag { - STRICT - } - public ParseField(String value, String... deprecatedNames) { camelCaseName = Strings.toCamelCase(value); underscoreName = Strings.toUnderscoreCase(value); @@ -80,19 +77,21 @@ public class ParseField { return parseField; } - boolean match(String currentFieldName, EnumSet flags) { + boolean match(String currentFieldName, boolean strict) { if (allReplacedWith == null && (currentFieldName.equals(camelCaseName) || currentFieldName.equals(underscoreName))) { return true; } String msg; for (String depName : deprecatedNames) { if (currentFieldName.equals(depName)) { - if (flags.contains(Flag.STRICT)) { - msg = "Deprecated field [" + currentFieldName + "] used, expected [" + underscoreName + "] instead"; - if (allReplacedWith != null) { - msg = "Deprecated field [" + currentFieldName + "] used, replaced by [" + allReplacedWith + "]"; - } + msg = "Deprecated field [" + currentFieldName + "] used, expected [" + underscoreName + "] instead"; + if (allReplacedWith != null) { + msg = "Deprecated field [" + currentFieldName + "] used, replaced by [" + allReplacedWith + "]"; + } + if (strict) { throw new IllegalArgumentException(msg); + } else { + DEPRECATION_LOGGER.deprecated(msg); } return true; } diff --git a/core/src/main/java/org/elasticsearch/common/ParseFieldMatcher.java b/core/src/main/java/org/elasticsearch/common/ParseFieldMatcher.java index 137e5b4a966..9866694a230 100644 --- a/core/src/main/java/org/elasticsearch/common/ParseFieldMatcher.java +++ b/core/src/main/java/org/elasticsearch/common/ParseFieldMatcher.java @@ -21,29 +21,28 @@ package org.elasticsearch.common; import org.elasticsearch.common.settings.Settings; -import java.util.EnumSet; - /** * Matcher to use in combination with {@link ParseField} while parsing requests. Matches a {@link ParseField} * against a field name and throw deprecation exception depending on the current value of the {@link #PARSE_STRICT} setting. */ public class ParseFieldMatcher { public static final String PARSE_STRICT = "index.query.parse.strict"; - public static final ParseFieldMatcher EMPTY = new ParseFieldMatcher(ParseField.EMPTY_FLAGS); - public static final ParseFieldMatcher STRICT = new ParseFieldMatcher(ParseField.STRICT_FLAGS); + public static final ParseFieldMatcher EMPTY = new ParseFieldMatcher(false); + public static final ParseFieldMatcher STRICT = new ParseFieldMatcher(true); - private final EnumSet parseFlags; + private final boolean strict; public ParseFieldMatcher(Settings settings) { - if (settings.getAsBoolean(PARSE_STRICT, false)) { - this.parseFlags = EnumSet.of(ParseField.Flag.STRICT); - } else { - this.parseFlags = ParseField.EMPTY_FLAGS; - } + this(settings.getAsBoolean(PARSE_STRICT, false)); } - public ParseFieldMatcher(EnumSet parseFlags) { - this.parseFlags = parseFlags; + public ParseFieldMatcher(boolean strict) { + this.strict = strict; + } + + /** Should deprecated settings be rejected? */ + public boolean isStrict() { + return strict; } /** @@ -55,6 +54,6 @@ public class ParseFieldMatcher { * @return true whenever the parse field that we are looking for was found, false otherwise */ public boolean match(String fieldName, ParseField parseField) { - return parseField.match(fieldName, parseFlags); + return parseField.match(fieldName, strict); } } diff --git a/core/src/main/java/org/elasticsearch/percolator/PercolatorModule.java b/core/src/main/java/org/elasticsearch/common/SuppressLoggerChecks.java similarity index 63% rename from core/src/main/java/org/elasticsearch/percolator/PercolatorModule.java rename to core/src/main/java/org/elasticsearch/common/SuppressLoggerChecks.java index 68b8db55e31..c6f23f72f96 100644 --- a/core/src/main/java/org/elasticsearch/percolator/PercolatorModule.java +++ b/core/src/main/java/org/elasticsearch/common/SuppressLoggerChecks.java @@ -16,18 +16,18 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.percolator; +package org.elasticsearch.common; -import org.elasticsearch.common.inject.AbstractModule; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; /** - * + * Annotation to suppress logging usage checks errors inside a whole class or a method. */ -public class PercolatorModule extends AbstractModule { - - @Override - protected void configure() { - bind(PercolateDocumentParser.class).asEagerSingleton(); - bind(PercolatorService.class).asEagerSingleton(); - } +@Retention(RetentionPolicy.CLASS) +@Target({ ElementType.CONSTRUCTOR, ElementType.METHOD, ElementType.TYPE }) +public @interface SuppressLoggerChecks { + String reason(); } diff --git a/core/src/main/java/org/elasticsearch/common/breaker/ChildMemoryCircuitBreaker.java b/core/src/main/java/org/elasticsearch/common/breaker/ChildMemoryCircuitBreaker.java index 0a0be5eb163..e6c67dbe9ba 100644 --- a/core/src/main/java/org/elasticsearch/common/breaker/ChildMemoryCircuitBreaker.java +++ b/core/src/main/java/org/elasticsearch/common/breaker/ChildMemoryCircuitBreaker.java @@ -93,7 +93,7 @@ public class ChildMemoryCircuitBreaker implements CircuitBreaker { final String message = "[" + this.name + "] Data too large, data for [" + fieldName + "] would be larger than limit of [" + memoryBytesLimit + "/" + new ByteSizeValue(memoryBytesLimit) + "]"; - logger.debug(message); + logger.debug("{}", message); throw new CircuitBreakingException(message, bytesNeeded, this.memoryBytesLimit); } diff --git a/core/src/main/java/org/elasticsearch/common/breaker/MemoryCircuitBreaker.java b/core/src/main/java/org/elasticsearch/common/breaker/MemoryCircuitBreaker.java index b069456b5d4..3a5297b277b 100644 --- a/core/src/main/java/org/elasticsearch/common/breaker/MemoryCircuitBreaker.java +++ b/core/src/main/java/org/elasticsearch/common/breaker/MemoryCircuitBreaker.java @@ -81,7 +81,7 @@ public class MemoryCircuitBreaker implements CircuitBreaker { this.trippedCount.incrementAndGet(); final String message = "Data too large, data for field [" + fieldName + "] would be larger than limit of [" + memoryBytesLimit + "/" + new ByteSizeValue(memoryBytesLimit) + "]"; - logger.debug(message); + logger.debug("{}", message); throw new CircuitBreakingException(message); } diff --git a/core/src/main/java/org/elasticsearch/common/cli/CheckFileCommand.java b/core/src/main/java/org/elasticsearch/common/cli/CheckFileCommand.java deleted file mode 100644 index e2fcbe89df8..00000000000 --- a/core/src/main/java/org/elasticsearch/common/cli/CheckFileCommand.java +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.cli; - -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; - -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.attribute.PosixFileAttributeView; -import java.nio.file.attribute.PosixFileAttributes; -import java.nio.file.attribute.PosixFilePermission; -import java.nio.file.attribute.PosixFilePermissions; -import java.util.HashMap; -import java.util.Map; -import java.util.Set; - -/** - * A helper command that checks if configured paths have been changed when running a CLI command. - * It is only executed in case of specified paths by the command and if the paths underlying filesystem - * supports posix permissions. - * - * If this is the case, a warn message is issued whenever an owner, a group or the file permissions is changed by - * the command being executed and not configured back to its prior state, which should be the task of the command - * being executed. - * - */ -public abstract class CheckFileCommand extends CliTool.Command { - - public CheckFileCommand(Terminal terminal) { - super(terminal); - } - - /** - * abstract method, which should implement the same logic as CliTool.Command.execute(), but is wrapped - */ - public abstract CliTool.ExitStatus doExecute(Settings settings, Environment env) throws Exception; - - /** - * Returns the array of paths, that should be checked if the permissions, user or groups have changed - * before and after execution of the command - * - */ - protected abstract Path[] pathsForPermissionsCheck(Settings settings, Environment env) throws Exception; - - @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception { - Path[] paths = pathsForPermissionsCheck(settings, env); - - if (paths == null || paths.length == 0) { - return doExecute(settings, env); - } - - Map> permissions = new HashMap<>(paths.length); - Map owners = new HashMap<>(paths.length); - Map groups = new HashMap<>(paths.length); - - if (paths != null && paths.length > 0) { - for (Path path : paths) { - try { - boolean supportsPosixPermissions = Environment.getFileStore(path).supportsFileAttributeView(PosixFileAttributeView.class); - if (supportsPosixPermissions) { - PosixFileAttributes attributes = Files.readAttributes(path, PosixFileAttributes.class); - permissions.put(path, attributes.permissions()); - owners.put(path, attributes.owner().getName()); - groups.put(path, attributes.group().getName()); - } - } catch (IOException e) { - // silently swallow if not supported, no need to log things - } - } - } - - CliTool.ExitStatus status = doExecute(settings, env); - - // check if permissions differ - for (Map.Entry> entry : permissions.entrySet()) { - if (!Files.exists(entry.getKey())) { - continue; - } - - Set permissionsBeforeWrite = entry.getValue(); - Set permissionsAfterWrite = Files.getPosixFilePermissions(entry.getKey()); - if (!permissionsBeforeWrite.equals(permissionsAfterWrite)) { - terminal.println(Terminal.Verbosity.SILENT, "WARNING: The file permissions of [" + entry.getKey() + "] have changed " - + "from [" + PosixFilePermissions.toString(permissionsBeforeWrite) + "] " - + "to [" + PosixFilePermissions.toString(permissionsAfterWrite) + "]"); - terminal.println(Terminal.Verbosity.SILENT, "Please ensure that the user account running Elasticsearch has read access to this file!"); - } - } - - // check if owner differs - for (Map.Entry entry : owners.entrySet()) { - if (!Files.exists(entry.getKey())) { - continue; - } - - String ownerBeforeWrite = entry.getValue(); - String ownerAfterWrite = Files.getOwner(entry.getKey()).getName(); - if (!ownerAfterWrite.equals(ownerBeforeWrite)) { - terminal.println(Terminal.Verbosity.SILENT, "WARNING: Owner of file [" + entry.getKey() + "] used to be [" + ownerBeforeWrite + "], but now is [" + ownerAfterWrite + "]"); - } - } - - // check if group differs - for (Map.Entry entry : groups.entrySet()) { - if (!Files.exists(entry.getKey())) { - continue; - } - - String groupBeforeWrite = entry.getValue(); - String groupAfterWrite = Files.readAttributes(entry.getKey(), PosixFileAttributes.class).group().getName(); - if (!groupAfterWrite.equals(groupBeforeWrite)) { - terminal.println(Terminal.Verbosity.SILENT, "WARNING: Group of file [" + entry.getKey() + "] used to be [" + groupBeforeWrite + "], but now is [" + groupAfterWrite + "]"); - } - } - - return status; - } -} diff --git a/core/src/main/java/org/elasticsearch/common/cli/CliTool.java b/core/src/main/java/org/elasticsearch/common/cli/CliTool.java deleted file mode 100644 index 2ea01f45068..00000000000 --- a/core/src/main/java/org/elasticsearch/common/cli/CliTool.java +++ /dev/null @@ -1,250 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.cli; - -import org.apache.commons.cli.AlreadySelectedException; -import org.apache.commons.cli.CommandLine; -import org.apache.commons.cli.CommandLineParser; -import org.apache.commons.cli.DefaultParser; -import org.apache.commons.cli.MissingArgumentException; -import org.apache.commons.cli.MissingOptionException; -import org.apache.commons.cli.UnrecognizedOptionException; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.node.internal.InternalSettingsPreparer; - -import java.util.Locale; - -import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; - -/** - * A base class for command-line interface tool. - * - * Two modes are supported: - * - * - Single command mode. The tool exposes a single command that can potentially accept arguments (eg. CLI options). - * - Multi command mode. The tool support multiple commands, each for different tasks, each potentially accepts arguments. - * - * In a multi-command mode. The first argument must be the command name. For example, the plugin manager - * can be seen as a multi-command tool with two possible commands: install and uninstall - * - * The tool is configured using a {@link CliToolConfig} which encapsulates the tool's commands and their - * potential options. The tool also comes with out of the box simple help support (the -h/--help option is - * automatically handled) where the help text is configured in a dedicated *.help files located in the same package - * as the tool. - */ -public abstract class CliTool { - - // based on sysexits.h - public enum ExitStatus { - OK(0), - OK_AND_EXIT(0), - USAGE(64), /* command line usage error */ - DATA_ERROR(65), /* data format error */ - NO_INPUT(66), /* cannot open input */ - NO_USER(67), /* addressee unknown */ - NO_HOST(68), /* host name unknown */ - UNAVAILABLE(69), /* service unavailable */ - CODE_ERROR(70), /* internal software error */ - CANT_CREATE(73), /* can't create (user) output file */ - IO_ERROR(74), /* input/output error */ - TEMP_FAILURE(75), /* temp failure; user is invited to retry */ - PROTOCOL(76), /* remote error in protocol */ - NOPERM(77), /* permission denied */ - CONFIG(78); /* configuration error */ - - final int status; - - ExitStatus(int status) { - this.status = status; - } - - public int status() { - return status; - } - } - - protected final Terminal terminal; - protected final Environment env; - protected final Settings settings; - - private final CliToolConfig config; - - protected CliTool(CliToolConfig config) { - this(config, Terminal.DEFAULT); - } - - protected CliTool(CliToolConfig config, Terminal terminal) { - if (config.cmds().size() == 0) { - throw new IllegalArgumentException("At least one command must be configured"); - } - this.config = config; - this.terminal = terminal; - env = InternalSettingsPreparer.prepareEnvironment(EMPTY_SETTINGS, terminal); - settings = env.settings(); - } - - public final ExitStatus execute(String... args) throws Exception { - - // first lets see if the user requests tool help. We're doing it only if - // this is a multi-command tool. If it's a single command tool, the -h/--help - // option will be taken care of on the command level - if (!config.isSingle() && args.length > 0 && (args[0].equals("-h") || args[0].equals("--help"))) { - config.printUsage(terminal); - return ExitStatus.OK_AND_EXIT; - } - - CliToolConfig.Cmd cmd; - if (config.isSingle()) { - cmd = config.single(); - } else { - - if (args.length == 0) { - terminal.println(Terminal.Verbosity.SILENT, "ERROR: command not specified"); - config.printUsage(terminal); - return ExitStatus.USAGE; - } - - String cmdName = args[0]; - cmd = config.cmd(cmdName); - if (cmd == null) { - terminal.println(Terminal.Verbosity.SILENT, "ERROR: unknown command [" + cmdName + "]. Use [-h] option to list available commands"); - return ExitStatus.USAGE; - } - - // we now remove the command name from the args - if (args.length == 1) { - args = new String[0]; - } else { - String[] cmdArgs = new String[args.length - 1]; - System.arraycopy(args, 1, cmdArgs, 0, cmdArgs.length); - args = cmdArgs; - } - } - - try { - return parse(cmd, args).execute(settings, env); - } catch (UserError error) { - terminal.println(Terminal.Verbosity.SILENT, "ERROR: " + error.getMessage()); - return error.exitStatus; - } - } - - public Command parse(String cmdName, String[] args) throws Exception { - CliToolConfig.Cmd cmd = config.cmd(cmdName); - return parse(cmd, args); - } - - public Command parse(CliToolConfig.Cmd cmd, String[] args) throws Exception { - CommandLineParser parser = new DefaultParser(); - CommandLine cli = parser.parse(CliToolConfig.OptionsSource.HELP.options(), args, true); - if (cli.hasOption("h")) { - return helpCmd(cmd); - } - try { - cli = parser.parse(cmd.options(), args, cmd.isStopAtNonOption()); - } catch (AlreadySelectedException|MissingArgumentException|MissingOptionException|UnrecognizedOptionException e) { - // intentionally drop the stack trace here as these are really user errors, - // the stack trace into cli parsing lib is not important - throw new UserError(ExitStatus.USAGE, e.toString()); - } - - if (cli.hasOption("v")) { - terminal.setVerbosity(Terminal.Verbosity.VERBOSE); - } else if (cli.hasOption("s")) { - terminal.setVerbosity(Terminal.Verbosity.SILENT); - } else { - terminal.setVerbosity(Terminal.Verbosity.NORMAL); - } - return parse(cmd.name(), cli); - } - - protected Command.Help helpCmd(CliToolConfig.Cmd cmd) { - return new Command.Help(cmd, terminal); - } - - protected static Command.Exit exitCmd(ExitStatus status) { - return new Command.Exit(null, status, null); - } - - protected static Command.Exit exitCmd(ExitStatus status, Terminal terminal, String msg, Object... args) { - return new Command.Exit(String.format(Locale.ROOT, msg, args), status, terminal); - } - - protected abstract Command parse(String cmdName, CommandLine cli) throws Exception; - - public static abstract class Command { - - protected final Terminal terminal; - - protected Command(Terminal terminal) { - this.terminal = terminal; - } - - public abstract ExitStatus execute(Settings settings, Environment env) throws Exception; - - public static class Help extends Command { - - private final CliToolConfig.Cmd cmd; - - private Help(CliToolConfig.Cmd cmd, Terminal terminal) { - super(terminal); - this.cmd = cmd; - } - - @Override - public ExitStatus execute(Settings settings, Environment env) throws Exception { - cmd.printUsage(terminal); - return ExitStatus.OK_AND_EXIT; - } - } - - public static class Exit extends Command { - private final String msg; - private final ExitStatus status; - - private Exit(String msg, ExitStatus status, Terminal terminal) { - super(terminal); - this.msg = msg; - this.status = status; - } - - @Override - public ExitStatus execute(Settings settings, Environment env) throws Exception { - if (msg != null) { - if (status != ExitStatus.OK) { - terminal.println(Terminal.Verbosity.SILENT, "ERROR: " + msg); - } else { - terminal.println(msg); - } - } - return status; - } - - public ExitStatus status() { - return status; - } - } - } - - - -} - diff --git a/core/src/main/java/org/elasticsearch/common/cli/CliToolConfig.java b/core/src/main/java/org/elasticsearch/common/cli/CliToolConfig.java deleted file mode 100644 index d0ba897b33d..00000000000 --- a/core/src/main/java/org/elasticsearch/common/cli/CliToolConfig.java +++ /dev/null @@ -1,302 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.cli; - -import org.apache.commons.cli.Option; -import org.apache.commons.cli.OptionGroup; -import org.apache.commons.cli.Options; - -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -/** - * - */ -public class CliToolConfig { - - public static Builder config(String name, Class toolType) { - return new Builder(name, toolType); - } - - private final Class toolType; - private final String name; - private final Map cmds; - - private static final HelpPrinter helpPrinter = new HelpPrinter(); - - private CliToolConfig(String name, Class toolType, Cmd[] cmds) { - this.name = name; - this.toolType = toolType; - final Map cmdsMapping = new HashMap<>(); - for (int i = 0; i < cmds.length; i++) { - cmdsMapping.put(cmds[i].name, cmds[i]); - } - this.cmds = Collections.unmodifiableMap(cmdsMapping); - } - - public boolean isSingle() { - return cmds.size() == 1; - } - - public Cmd single() { - assert isSingle() : "Requesting single command on a multi-command tool"; - return cmds.values().iterator().next(); - } - - public Class toolType() { - return toolType; - } - - public String name() { - return name; - } - - public Collection cmds() { - return cmds.values(); - } - - public Cmd cmd(String name) { - return cmds.get(name); - } - - public void printUsage(Terminal terminal) { - helpPrinter.print(this, terminal); - } - - public static class Builder { - - public static Cmd.Builder cmd(String name, Class cmdType) { - return new Cmd.Builder(name, cmdType); - } - - public static OptionBuilder option(String shortName, String longName) { - return new OptionBuilder(shortName, longName); - } - - public static Option.Builder optionBuilder(String shortName, String longName) { - return Option.builder(shortName).argName(longName).longOpt(longName); - } - - public static OptionGroupBuilder optionGroup(boolean required) { - return new OptionGroupBuilder(required); - } - - private final Class toolType; - private final String name; - private Cmd[] cmds; - - private Builder(String name, Class toolType) { - this.name = name; - this.toolType = toolType; - } - - public Builder cmds(Cmd.Builder... cmds) { - this.cmds = new Cmd[cmds.length]; - for (int i = 0; i < cmds.length; i++) { - this.cmds[i] = cmds[i].build(); - this.cmds[i].toolName = name; - } - return this; - } - - public Builder cmds(Cmd... cmds) { - for (int i = 0; i < cmds.length; i++) { - cmds[i].toolName = name; - } - this.cmds = cmds; - return this; - } - - public CliToolConfig build() { - return new CliToolConfig(name, toolType, cmds); - } - } - - public static class Cmd { - - private String toolName; - private final String name; - private final Class cmdType; - private final Options options; - private final boolean stopAtNonOption; - - private Cmd(String name, Class cmdType, Options options, boolean stopAtNonOption) { - this.name = name; - this.cmdType = cmdType; - this.options = options; - this.stopAtNonOption = stopAtNonOption; - OptionsSource.VERBOSITY.populate(options); - } - - public Class cmdType() { - return cmdType; - } - - public String name() { - return name; - } - - public Options options() { - return options; - } - - public boolean isStopAtNonOption() { - return stopAtNonOption; - } - - public void printUsage(Terminal terminal) { - helpPrinter.print(toolName, this, terminal); - } - - public static class Builder { - - private final String name; - private final Class cmdType; - private Options options = new Options(); - private boolean stopAtNonOption = false; - - private Builder(String name, Class cmdType) { - this.name = name; - this.cmdType = cmdType; - } - - public Builder options(OptionBuilder... optionBuilder) { - for (int i = 0; i < optionBuilder.length; i++) { - options.addOption(optionBuilder[i].build()); - } - return this; - } - - public Builder options(Option.Builder... optionBuilders) { - for (int i = 0; i < optionBuilders.length; i++) { - options.addOption(optionBuilders[i].build()); - } - return this; - } - - public Builder optionGroups(OptionGroupBuilder... optionGroupBuilders) { - for (OptionGroupBuilder builder : optionGroupBuilders) { - options.addOptionGroup(builder.build()); - } - return this; - } - - /** - * @param stopAtNonOption if true an unrecognized argument stops - * the parsing and the remaining arguments are added to the - * args list. If false an unrecognized - * argument triggers a ParseException. - */ - public Builder stopAtNonOption(boolean stopAtNonOption) { - this.stopAtNonOption = stopAtNonOption; - return this; - } - - public Cmd build() { - return new Cmd(name, cmdType, options, stopAtNonOption); - } - } - } - - public static class OptionBuilder { - - private final Option option; - - private OptionBuilder(String shortName, String longName) { - option = new Option(shortName, ""); - option.setLongOpt(longName); - option.setArgName(longName); - } - - public OptionBuilder required(boolean required) { - option.setRequired(required); - return this; - } - - public OptionBuilder hasArg(boolean optional) { - option.setOptionalArg(optional); - option.setArgs(1); - return this; - } - - public Option build() { - return option; - } - } - - public static class OptionGroupBuilder { - - private OptionGroup group; - - private OptionGroupBuilder(boolean required) { - group = new OptionGroup(); - group.setRequired(required); - } - - public OptionGroupBuilder options(OptionBuilder... optionBuilders) { - for (OptionBuilder builder : optionBuilders) { - group.addOption(builder.build()); - } - return this; - } - - public OptionGroup build() { - return group; - } - - } - - static abstract class OptionsSource { - - static final OptionsSource HELP = new OptionsSource() { - - @Override - void populate(Options options) { - options.addOption(new OptionBuilder("h", "help").required(false).build()); - } - }; - - static final OptionsSource VERBOSITY = new OptionsSource() { - @Override - void populate(Options options) { - OptionGroup verbosityGroup = new OptionGroup(); - verbosityGroup.setRequired(false); - verbosityGroup.addOption(new OptionBuilder("s", "silent").required(false).build()); - verbosityGroup.addOption(new OptionBuilder("v", "verbose").required(false).build()); - options.addOptionGroup(verbosityGroup); - } - }; - - private Options options; - - Options options() { - if (options == null) { - options = new Options(); - populate(options); - } - return options; - } - - abstract void populate(Options options); - - } -} diff --git a/core/src/main/java/org/elasticsearch/common/cli/HelpPrinter.java b/core/src/main/java/org/elasticsearch/common/cli/HelpPrinter.java deleted file mode 100644 index ada6cc33a19..00000000000 --- a/core/src/main/java/org/elasticsearch/common/cli/HelpPrinter.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.cli; - -import org.elasticsearch.common.io.Streams; -import org.elasticsearch.common.util.Callback; - -import java.io.IOException; -import java.io.InputStream; - -/** - * - */ -public class HelpPrinter { - - private static final String HELP_FILE_EXT = ".help"; - - public void print(CliToolConfig config, Terminal terminal) { - print(config.toolType(), config.name(), terminal); - } - - public void print(String toolName, CliToolConfig.Cmd cmd, Terminal terminal) { - print(cmd.cmdType(), toolName + "-" + cmd.name(), terminal); - } - - private static void print(Class clazz, String name, final Terminal terminal) { - terminal.println(Terminal.Verbosity.SILENT, ""); - try (InputStream input = clazz.getResourceAsStream(name + HELP_FILE_EXT)) { - Streams.readAllLines(input, new Callback() { - @Override - public void handle(String line) { - terminal.println(Terminal.Verbosity.SILENT, line); - } - }); - } catch (IOException ioe) { - throw new RuntimeException(ioe); - } - terminal.println(Terminal.Verbosity.SILENT, ""); - } -} diff --git a/core/src/main/java/org/elasticsearch/common/geo/ShapesAvailability.java b/core/src/main/java/org/elasticsearch/common/geo/ShapesAvailability.java index 8af203f2ce8..dff4277e96f 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/ShapesAvailability.java +++ b/core/src/main/java/org/elasticsearch/common/geo/ShapesAvailability.java @@ -29,7 +29,7 @@ public class ShapesAvailability { static { boolean xSPATIAL4J_AVAILABLE; try { - Class.forName("com.spatial4j.core.shape.impl.PointImpl"); + Class.forName("org.locationtech.spatial4j.shape.impl.PointImpl"); xSPATIAL4J_AVAILABLE = true; } catch (Throwable t) { xSPATIAL4J_AVAILABLE = false; diff --git a/core/src/main/java/org/elasticsearch/common/geo/XShapeCollection.java b/core/src/main/java/org/elasticsearch/common/geo/XShapeCollection.java index 42650275b4b..7ee2bfbd42f 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/XShapeCollection.java +++ b/core/src/main/java/org/elasticsearch/common/geo/XShapeCollection.java @@ -19,9 +19,9 @@ package org.elasticsearch.common.geo; -import com.spatial4j.core.context.SpatialContext; -import com.spatial4j.core.shape.Shape; -import com.spatial4j.core.shape.ShapeCollection; +import org.locationtech.spatial4j.context.SpatialContext; +import org.locationtech.spatial4j.shape.Shape; +import org.locationtech.spatial4j.shape.ShapeCollection; import java.util.List; diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java index bda0106f2b6..97ef6561c9b 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo.builders; -import com.spatial4j.core.shape.Circle; +import org.locationtech.spatial4j.shape.Circle; import com.vividsolutions.jts.geom.Coordinate; import org.elasticsearch.common.io.stream.StreamInput; diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java index 426cbbf7800..ab997387ea1 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo.builders; -import com.spatial4j.core.shape.Rectangle; +import org.locationtech.spatial4j.shape.Rectangle; import com.vividsolutions.jts.geom.Coordinate; import org.elasticsearch.common.io.stream.StreamInput; diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java index 420f61a6799..d21f47cf053 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo.builders; -import com.spatial4j.core.shape.Shape; +import org.locationtech.spatial4j.shape.Shape; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.geo.XShapeCollection; diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java index 8c2870e1e09..cbc9002c785 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo.builders; -import com.spatial4j.core.shape.Shape; +import org.locationtech.spatial4j.shape.Shape; import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.Geometry; import com.vividsolutions.jts.geom.GeometryFactory; diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java index e69c0abe4f8..51f4fd232c5 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo.builders; -import com.spatial4j.core.shape.Shape; +import org.locationtech.spatial4j.shape.Shape; import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.Geometry; import com.vividsolutions.jts.geom.LineString; diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java index 12b16254957..b8f2c8137ef 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java @@ -19,8 +19,8 @@ package org.elasticsearch.common.geo.builders; -import com.spatial4j.core.shape.Point; -import com.spatial4j.core.shape.Shape; +import org.locationtech.spatial4j.shape.Point; +import org.locationtech.spatial4j.shape.Shape; import com.vividsolutions.jts.geom.Coordinate; import org.elasticsearch.common.geo.XShapeCollection; diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java index 394892d909d..6ee679b7308 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo.builders; -import com.spatial4j.core.shape.Shape; +import org.locationtech.spatial4j.shape.Shape; import com.vividsolutions.jts.geom.Coordinate; import org.elasticsearch.common.geo.XShapeCollection; diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java index 1cee6525e7a..30b7e370f22 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo.builders; -import com.spatial4j.core.shape.Point; +import org.locationtech.spatial4j.shape.Point; import com.vividsolutions.jts.geom.Coordinate; import org.elasticsearch.common.io.stream.StreamInput; diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java index ab480cfbc24..52314c98ef1 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java @@ -19,8 +19,8 @@ package org.elasticsearch.common.geo.builders; -import com.spatial4j.core.exception.InvalidShapeException; -import com.spatial4j.core.shape.Shape; +import org.locationtech.spatial4j.exception.InvalidShapeException; +import org.locationtech.spatial4j.shape.Shape; import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.Geometry; import com.vividsolutions.jts.geom.GeometryFactory; @@ -314,7 +314,7 @@ public class PolygonBuilder extends ShapeBuilder { double shiftOffset = any.coordinate.x > DATELINE ? DATELINE : (any.coordinate.x < -DATELINE ? -DATELINE : 0); if (debugEnabled()) { - LOGGER.debug("shift: {[]}", shiftOffset); + LOGGER.debug("shift: [{}]", shiftOffset); } // run along the border of the component, collect the @@ -392,9 +392,9 @@ public class PolygonBuilder extends ShapeBuilder { if(debugEnabled()) { for (int i = 0; i < result.length; i++) { - LOGGER.debug("Component {[]}:", i); + LOGGER.debug("Component [{}]:", i); for (int j = 0; j < result[i].length; j++) { - LOGGER.debug("\t" + Arrays.toString(result[i][j])); + LOGGER.debug("\t{}", Arrays.toString(result[i][j])); } } } @@ -444,7 +444,7 @@ public class PolygonBuilder extends ShapeBuilder { // is an arbitrary point of the hole. The polygon edge next to this point // is part of the polygon the hole belongs to. if (debugEnabled()) { - LOGGER.debug("Holes: " + Arrays.toString(holes)); + LOGGER.debug("Holes: {}", Arrays.toString(holes)); } for (int i = 0; i < numHoles; i++) { final Edge current = new Edge(holes[i].coordinate, holes[i].next); @@ -464,9 +464,9 @@ public class PolygonBuilder extends ShapeBuilder { final int component = -edges[index].component - numHoles - 1; if(debugEnabled()) { - LOGGER.debug("\tposition ("+index+") of edge "+current+": " + edges[index]); - LOGGER.debug("\tComponent: " + component); - LOGGER.debug("\tHole intersections ("+current.coordinate.x+"): " + Arrays.toString(edges)); + LOGGER.debug("\tposition ({}) of edge {}: {}", index, current, edges[index]); + LOGGER.debug("\tComponent: {}", component); + LOGGER.debug("\tHole intersections ({}): {}", current.coordinate.x, Arrays.toString(edges)); } components.get(component).add(points[i]); diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java index 8c3ea3f3261..d0c73964575 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java @@ -19,10 +19,10 @@ package org.elasticsearch.common.geo.builders; -import com.spatial4j.core.context.jts.JtsSpatialContext; -import com.spatial4j.core.exception.InvalidShapeException; -import com.spatial4j.core.shape.Shape; -import com.spatial4j.core.shape.jts.JtsGeometry; +import org.locationtech.spatial4j.context.jts.JtsSpatialContext; +import org.locationtech.spatial4j.exception.InvalidShapeException; +import org.locationtech.spatial4j.shape.Shape; +import org.locationtech.spatial4j.shape.jts.JtsGeometry; import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.Geometry; import com.vividsolutions.jts.geom.GeometryFactory; @@ -81,9 +81,9 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri * this normally isn't allowed. */ protected final boolean multiPolygonMayOverlap = false; - /** @see com.spatial4j.core.shape.jts.JtsGeometry#validate() */ + /** @see org.locationtech.spatial4j.shape.jts.JtsGeometry#validate() */ protected final boolean autoValidateJtsGeometry = true; - /** @see com.spatial4j.core.shape.jts.JtsGeometry#index() */ + /** @see org.locationtech.spatial4j.shape.jts.JtsGeometry#index() */ protected final boolean autoIndexJtsGeometry = true;//may want to turn off once SpatialStrategy impls do it. protected ShapeBuilder() { diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index 8eda42ae9be..aca136a2a9a 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -37,7 +37,11 @@ import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder; +import org.elasticsearch.ingest.IngestStats; import org.elasticsearch.search.rescore.RescoreBuilder; +import org.elasticsearch.search.suggest.SuggestionBuilder; +import org.elasticsearch.search.suggest.completion.context.QueryContext; +import org.elasticsearch.search.suggest.phrase.SmoothingModel; import org.elasticsearch.tasks.Task; import org.elasticsearch.search.aggregations.AggregatorBuilder; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilder; @@ -64,6 +68,7 @@ import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.function.Function; import java.util.function.Supplier; import static org.elasticsearch.ElasticsearchException.readException; @@ -282,6 +287,14 @@ public abstract class StreamInput extends InputStream { return null; } + @Nullable + public Float readOptionalFloat() throws IOException { + if (readBoolean()) { + return readFloat(); + } + return null; + } + @Nullable public Integer readOptionalVInt() throws IOException { if (readBoolean()) { @@ -552,6 +565,14 @@ public abstract class StreamInput extends InputStream { } } + public T readOptionalWritable(Writeable.IOFunction provider) throws IOException { + if (readBoolean()) { + return provider.apply(this); + } else { + return null; + } + } + public T readThrowable() throws IOException { if (readBoolean()) { int key = readVInt(); @@ -698,6 +719,13 @@ public abstract class StreamInput extends InputStream { return readNamedWriteable(RescoreBuilder.class); } + /** + * Reads a {@link SuggestionBuilder} from the current stream + */ + public SuggestionBuilder readSuggestion() throws IOException { + return readNamedWriteable(SuggestionBuilder.class); + } + /** * Reads a {@link org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder} from the current stream */ @@ -705,6 +733,13 @@ public abstract class StreamInput extends InputStream { return readNamedWriteable(ScoreFunctionBuilder.class); } + /** + * Reads a {@link SmoothingModel} from the current stream + */ + public SmoothingModel readPhraseSuggestionSmoothingModel() throws IOException { + return readNamedWriteable(SmoothingModel.class); + } + /** * Reads a {@link Task.Status} from the current stream. */ diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index 864da006bf0..dd357e27709 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -37,6 +37,9 @@ import org.elasticsearch.common.text.Text; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder; import org.elasticsearch.search.rescore.RescoreBuilder; +import org.elasticsearch.search.suggest.SuggestionBuilder; +import org.elasticsearch.search.suggest.completion.context.QueryContext; +import org.elasticsearch.search.suggest.phrase.SmoothingModel; import org.elasticsearch.tasks.Task; import org.elasticsearch.search.aggregations.AggregatorBuilder; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilder; @@ -238,6 +241,15 @@ public abstract class StreamOutput extends OutputStream { } } + public void writeOptionalFloat(@Nullable Float floatValue) throws IOException { + if (floatValue == null) { + writeBoolean(false); + } else { + writeBoolean(true); + writeFloat(floatValue); + } + } + public void writeOptionalText(@Nullable Text text) throws IOException { if (text == null) { writeInt(-1); @@ -520,6 +532,15 @@ public abstract class StreamOutput extends OutputStream { } } + public void writeOptionalWriteable(@Nullable Writeable writeable) throws IOException { + if (writeable != null) { + writeBoolean(true); + writeable.writeTo(this); + } else { + writeBoolean(false); + } + } + public void writeThrowable(Throwable throwable) throws IOException { if (throwable == null) { writeBoolean(false); @@ -682,6 +703,13 @@ public abstract class StreamOutput extends OutputStream { writeNamedWriteable(scoreFunctionBuilder); } + /** + * Writes the given {@link SmoothingModel} to the stream + */ + public void writePhraseSuggestionSmoothingModel(SmoothingModel smoothinModel) throws IOException { + writeNamedWriteable(smoothinModel); + } + /** * Writes a {@link Task.Status} to the current stream. */ @@ -713,4 +741,12 @@ public abstract class StreamOutput extends OutputStream { public void writeRescorer(RescoreBuilder rescorer) throws IOException { writeNamedWriteable(rescorer); } + + /** + * Writes a {@link SuggestionBuilder} to the current stream + */ + public void writeSuggestion(SuggestionBuilder suggestion) throws IOException { + writeNamedWriteable(suggestion); + } + } diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/Writeable.java b/core/src/main/java/org/elasticsearch/common/io/stream/Writeable.java index 9ff3de736c5..8f0cb3c96c7 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/Writeable.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/Writeable.java @@ -37,4 +37,15 @@ public interface Writeable extends StreamableReader { * Write this into the {@linkplain StreamOutput}. */ void writeTo(StreamOutput out) throws IOException; + + @FunctionalInterface + interface IOFunction { + /** + * Applies this function to the given argument. + * + * @param t the function argument + * @return the function result + */ + R apply(T t) throws IOException; + } } diff --git a/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java b/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java index 870b5f61466..b792a85d34c 100644 --- a/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java +++ b/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java @@ -19,6 +19,8 @@ package org.elasticsearch.common.logging; +import org.elasticsearch.common.SuppressLoggerChecks; + /** * A logger that logs deprecation notices. */ @@ -45,6 +47,7 @@ public class DeprecationLogger { /** * Logs a deprecated message. */ + @SuppressLoggerChecks(reason = "safely delegates to logger") public void deprecated(String msg, Object... params) { logger.debug(msg, params); } diff --git a/core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java b/core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java index 94ade9334d7..c0951c47df1 100644 --- a/core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java +++ b/core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java @@ -21,6 +21,7 @@ package org.elasticsearch.common.logging; import org.apache.log4j.Logger; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import java.util.Locale; @@ -30,9 +31,10 @@ import java.util.Locale; public abstract class ESLoggerFactory { public static final Setting LOG_DEFAULT_LEVEL_SETTING = - new Setting<>("logger.level", LogLevel.INFO.name(), LogLevel::parse, false, Setting.Scope.CLUSTER); + new Setting<>("logger.level", LogLevel.INFO.name(), LogLevel::parse, Property.NodeScope); public static final Setting LOG_LEVEL_SETTING = - Setting.prefixKeySetting("logger.", LogLevel.INFO.name(), LogLevel::parse, true, Setting.Scope.CLUSTER); + Setting.prefixKeySetting("logger.", LogLevel.INFO.name(), LogLevel::parse, + Property.Dynamic, Property.NodeScope); public static ESLogger getLogger(String prefix, String name) { prefix = prefix == null ? null : prefix.intern(); diff --git a/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java b/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java index 28feca13c02..da628b09d2b 100644 --- a/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java +++ b/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java @@ -110,9 +110,7 @@ public class LogConfigurator { if (resolveConfig) { resolveConfig(environment, settingsBuilder); } - settingsBuilder - .putProperties("elasticsearch.", BootstrapInfo.getSystemProperties()) - .putProperties("es.", BootstrapInfo.getSystemProperties()); + settingsBuilder.putProperties("es.", BootstrapInfo.getSystemProperties()); // add custom settings after config was added so that they are not overwritten by config settingsBuilder.put(settings); settingsBuilder.replacePropertyPlaceholders(); diff --git a/core/src/main/java/org/elasticsearch/common/logging/TerminalAppender.java b/core/src/main/java/org/elasticsearch/common/logging/TerminalAppender.java index 7031a62a999..e967ad9d79e 100644 --- a/core/src/main/java/org/elasticsearch/common/logging/TerminalAppender.java +++ b/core/src/main/java/org/elasticsearch/common/logging/TerminalAppender.java @@ -22,7 +22,7 @@ package org.elasticsearch.common.logging; import org.apache.log4j.AppenderSkeleton; import org.apache.log4j.spi.LoggingEvent; -import org.elasticsearch.common.cli.Terminal; +import org.elasticsearch.cli.Terminal; /** * TerminalAppender logs event to Terminal.DEFAULT. It is used for example by the PluginCli. diff --git a/core/src/main/java/org/elasticsearch/common/lucene/IndexCacheableQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/IndexCacheableQuery.java deleted file mode 100644 index d31cd3835ec..00000000000 --- a/core/src/main/java/org/elasticsearch/common/lucene/IndexCacheableQuery.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.lucene; - -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.Weight; - -import java.io.IOException; -import java.util.Objects; - -/** - * Base implementation for a query which is cacheable at the index level but - * not the segment level as usually expected. - */ -public abstract class IndexCacheableQuery extends Query { - - private Object readerCacheKey; - - @Override - public Query rewrite(IndexReader reader) throws IOException { - if (reader.getCoreCacheKey() != this.readerCacheKey) { - IndexCacheableQuery rewritten = (IndexCacheableQuery) clone(); - rewritten.readerCacheKey = reader.getCoreCacheKey(); - return rewritten; - } - return super.rewrite(reader); - } - - @Override - public boolean equals(Object obj) { - return super.equals(obj) - && readerCacheKey == ((IndexCacheableQuery) obj).readerCacheKey; - } - - @Override - public int hashCode() { - return 31 * super.hashCode() + Objects.hashCode(readerCacheKey); - } - - @Override - public final Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException { - if (readerCacheKey == null) { - throw new IllegalStateException("Rewrite first"); - } - if (readerCacheKey != searcher.getIndexReader().getCoreCacheKey()) { - throw new IllegalStateException("Must create weight on the same reader which has been used for rewriting"); - } - return doCreateWeight(searcher, needsScores); - } - - /** Create a {@link Weight} for this query. - * @see Query#createWeight(IndexSearcher, boolean) - */ - public abstract Weight doCreateWeight(IndexSearcher searcher, boolean needsScores) throws IOException; -} diff --git a/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java index 43051f95b9a..8508a8a2e40 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -88,7 +88,7 @@ import java.util.Objects; public class Lucene { public static final String LATEST_DOC_VALUES_FORMAT = "Lucene54"; public static final String LATEST_POSTINGS_FORMAT = "Lucene50"; - public static final String LATEST_CODEC = "Lucene54"; + public static final String LATEST_CODEC = "Lucene60"; static { Deprecated annotation = PostingsFormat.forName(LATEST_POSTINGS_FORMAT).getClass().getAnnotation(Deprecated.class); @@ -111,7 +111,7 @@ public class Lucene { try { return Version.parse(version); } catch (ParseException e) { - logger.warn("no version match {}, default to {}", version, defaultVersion, e); + logger.warn("no version match {}, default to {}", e, version, defaultVersion); return defaultVersion; } } @@ -235,16 +235,7 @@ public class Lucene { @Override protected Object doBody(String segmentFileName) throws IOException { try (IndexInput input = directory.openInput(segmentFileName, IOContext.READ)) { - final int format = input.readInt(); - final int actualFormat; - if (format == CodecUtil.CODEC_MAGIC) { - // 4.0+ - actualFormat = CodecUtil.checkHeaderNoMagic(input, "segments", SegmentInfos.VERSION_40, Integer.MAX_VALUE); - if (actualFormat >= SegmentInfos.VERSION_48) { - CodecUtil.checksumEntireFile(input); - } - } - // legacy.... + CodecUtil.checksumEntireFile(input); } return null; } @@ -382,7 +373,7 @@ public class Lucene { writeMissingValue(out, comparatorSource.missingValue(sortField.getReverse())); } else { writeSortType(out, sortField.getType()); - writeMissingValue(out, sortField.missingValue); + writeMissingValue(out, sortField.getMissingValue()); } out.writeBoolean(sortField.getReverse()); } @@ -684,7 +675,7 @@ public class Lucene { segmentsFileName = infos.getSegmentsFileName(); this.dir = dir; userData = infos.getUserData(); - files = Collections.unmodifiableCollection(infos.files(dir, true)); + files = Collections.unmodifiableCollection(infos.files(true)); generation = infos.getGeneration(); segmentCount = infos.size(); } diff --git a/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java index e0e03b18e12..9851ac12a1a 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java @@ -226,7 +226,7 @@ public final class AllTermQuery extends Query { @Override public String toString(String field) { - return new TermQuery(term).toString(field) + ToStringUtils.boost(getBoost()); + return new TermQuery(term).toString(field); } } diff --git a/core/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java b/core/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java index 83de725a83a..971cbdafffe 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java @@ -30,7 +30,6 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; -import org.apache.lucene.util.BitDocIdSet; import org.apache.lucene.util.BitSet; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; @@ -118,9 +117,7 @@ public class FilterableTermsEnum extends TermsEnum { }; } - BitDocIdSet.Builder builder = new BitDocIdSet.Builder(context.reader().maxDoc()); - builder.or(docs); - bits = builder.build().bits(); + bits = BitSet.of(docs, context.reader().maxDoc()); // Count how many docs are in our filtered set // TODO make this lazy-loaded only for those that need it? diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java index d1efdc3ede2..52de9a7e5db 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java @@ -30,7 +30,6 @@ import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.StringHelper; -import org.apache.lucene.util.ToStringUtils; import java.io.IOException; import java.util.ArrayList; @@ -51,7 +50,7 @@ public class MultiPhrasePrefixQuery extends Query { /** * Sets the phrase slop for this query. * - * @see org.apache.lucene.search.PhraseQuery#setSlop(int) + * @see org.apache.lucene.search.PhraseQuery.Builder#setSlop(int) */ public void setSlop(int s) { slop = s; @@ -64,7 +63,7 @@ public class MultiPhrasePrefixQuery extends Query { /** * Sets the phrase slop for this query. * - * @see org.apache.lucene.search.PhraseQuery#getSlop() + * @see org.apache.lucene.search.PhraseQuery.Builder#getSlop() */ public int getSlop() { return slop; @@ -73,7 +72,7 @@ public class MultiPhrasePrefixQuery extends Query { /** * Add a single term at the next position in the phrase. * - * @see org.apache.lucene.search.PhraseQuery#add(Term) + * @see org.apache.lucene.search.PhraseQuery.Builder#add(Term) */ public void add(Term term) { add(new Term[]{term}); @@ -83,7 +82,7 @@ public class MultiPhrasePrefixQuery extends Query { * Add multiple terms at the next position in the phrase. Any of the terms * may match. * - * @see org.apache.lucene.search.PhraseQuery#add(Term) + * @see org.apache.lucene.search.PhraseQuery.Builder#add(Term) */ public void add(Term[] terms) { int position = 0; @@ -98,7 +97,7 @@ public class MultiPhrasePrefixQuery extends Query { * * @param terms the terms * @param position the position of the terms provided as argument - * @see org.apache.lucene.search.PhraseQuery#add(Term, int) + * @see org.apache.lucene.search.PhraseQuery.Builder#add(Term, int) */ public void add(Term[] terms, int position) { if (termArrays.size() == 0) @@ -135,7 +134,7 @@ public class MultiPhrasePrefixQuery extends Query { if (termArrays.isEmpty()) { return new MatchNoDocsQuery(); } - MultiPhraseQuery query = new MultiPhraseQuery(); + MultiPhraseQuery.Builder query = new MultiPhraseQuery.Builder(); query.setSlop(slop); int sizeMinus1 = termArrays.size() - 1; for (int i = 0; i < sizeMinus1; i++) { @@ -154,7 +153,7 @@ public class MultiPhrasePrefixQuery extends Query { return Queries.newMatchNoDocsQuery(); } query.add(terms.toArray(Term.class), position); - return query.rewrite(reader); + return query.build(); } private void getPrefixTerms(ObjectHashSet terms, final Term prefix, final IndexReader reader) throws IOException { @@ -231,8 +230,6 @@ public class MultiPhrasePrefixQuery extends Query { buffer.append(slop); } - buffer.append(ToStringUtils.boost(getBoost())); - return buffer.toString(); } diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/Queries.java b/core/src/main/java/org/elasticsearch/common/lucene/search/Queries.java index 73c3fc9400d..53ee2295ae4 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/Queries.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/Queries.java @@ -23,7 +23,6 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.PrefixQuery; @@ -132,11 +131,7 @@ public class Queries { builder.add(clause); } builder.setMinimumNumberShouldMatch(msm); - BooleanQuery bq = builder.build(); - if (query.getBoost() != 1f) { - return new BoostQuery(bq, query.getBoost()); - } - return bq; + return builder.build(); } else { return query; } diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java index a7b7300c9b6..54e8c0e3488 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java @@ -29,7 +29,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; -import org.apache.lucene.util.ToStringUtils; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -102,7 +101,7 @@ public class FiltersFunctionScoreQuery extends Query { } } - Query subQuery; + final Query subQuery; final FilterFunction[] filterFunctions; final ScoreMode scoreMode; final float maxBoost; @@ -136,9 +135,7 @@ public class FiltersFunctionScoreQuery extends Query { Query newQ = subQuery.rewrite(reader); if (newQ == subQuery) return this; - FiltersFunctionScoreQuery bq = (FiltersFunctionScoreQuery) this.clone(); - bq.subQuery = newQ; - return bq; + return new FiltersFunctionScoreQuery(newQ, scoreMode, filterFunctions, maxBoost, minScore, combineFunction); } @Override @@ -355,7 +352,6 @@ public class FiltersFunctionScoreQuery extends Query { sb.append("{filter(").append(filterFunction.filter).append("), function [").append(filterFunction.function).append("]}"); } sb.append("])"); - sb.append(ToStringUtils.boost(getBoost())); return sb.toString(); } diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java index 3cf4f3e48f7..646076a3a17 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java @@ -28,7 +28,6 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; -import org.apache.lucene.util.ToStringUtils; import java.io.IOException; import java.util.Objects; @@ -41,7 +40,7 @@ public class FunctionScoreQuery extends Query { public static final float DEFAULT_MAX_BOOST = Float.MAX_VALUE; - Query subQuery; + final Query subQuery; final ScoreFunction function; final float maxBoost; final CombineFunction combineFunction; @@ -84,9 +83,7 @@ public class FunctionScoreQuery extends Query { if (newQ == subQuery) { return this; } - FunctionScoreQuery bq = (FunctionScoreQuery) this.clone(); - bq.subQuery = newQ; - return bq; + return new FunctionScoreQuery(newQ, function, minScore, combineFunction, maxBoost); } @Override @@ -205,7 +202,6 @@ public class FunctionScoreQuery extends Query { public String toString(String field) { StringBuilder sb = new StringBuilder(); sb.append("function score (").append(subQuery.toString(field)).append(",function=").append(function).append(')'); - sb.append(ToStringUtils.boost(getBoost())); return sb.toString(); } diff --git a/core/src/main/java/org/elasticsearch/common/lucene/store/FilterIndexOutput.java b/core/src/main/java/org/elasticsearch/common/lucene/store/FilterIndexOutput.java index 616e43ac422..5e5fc826264 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/store/FilterIndexOutput.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/store/FilterIndexOutput.java @@ -30,7 +30,7 @@ public class FilterIndexOutput extends IndexOutput { protected final IndexOutput out; public FilterIndexOutput(String resourceDescription, IndexOutput out) { - super(resourceDescription); + super(resourceDescription, out.getName()); this.out = out; } diff --git a/core/src/main/java/org/elasticsearch/common/network/IfConfig.java b/core/src/main/java/org/elasticsearch/common/network/IfConfig.java index ea33274fad1..282f348c81b 100644 --- a/core/src/main/java/org/elasticsearch/common/network/IfConfig.java +++ b/core/src/main/java/org/elasticsearch/common/network/IfConfig.java @@ -31,14 +31,14 @@ import java.net.SocketException; import java.util.List; import java.util.Locale; -/** +/** * Simple class to log {@code ifconfig}-style output at DEBUG logging. */ final class IfConfig { - private static final ESLogger logger = Loggers.getLogger(IfConfig.class); + private static final ESLogger logger = Loggers.getLogger(IfConfig.class); private static final String INDENT = " "; - + /** log interface configuration at debug level, if its enabled */ static void logIfNecessary() { if (logger.isDebugEnabled()) { @@ -49,7 +49,7 @@ final class IfConfig { } } } - + /** perform actual logging: might throw exception if things go wrong */ private static void doLogging() throws IOException { StringBuilder msg = new StringBuilder(); @@ -59,14 +59,14 @@ final class IfConfig { // ordinary name msg.append(nic.getName()); msg.append(System.lineSeparator()); - + // display name (e.g. on windows) if (!nic.getName().equals(nic.getDisplayName())) { msg.append(INDENT); msg.append(nic.getDisplayName()); msg.append(System.lineSeparator()); } - + // addresses: v4 first, then v6 List addresses = nic.getInterfaceAddresses(); for (InterfaceAddress address : addresses) { @@ -76,7 +76,7 @@ final class IfConfig { msg.append(System.lineSeparator()); } } - + for (InterfaceAddress address : addresses) { if (address.getAddress() instanceof Inet6Address) { msg.append(INDENT); @@ -84,7 +84,7 @@ final class IfConfig { msg.append(System.lineSeparator()); } } - + // hardware address byte hardware[] = nic.getHardwareAddress(); if (hardware != null) { @@ -98,19 +98,19 @@ final class IfConfig { } msg.append(System.lineSeparator()); } - + // attributes msg.append(INDENT); msg.append(formatFlags(nic)); msg.append(System.lineSeparator()); } - logger.debug("configuration:" + System.lineSeparator() + "{}", msg.toString()); + logger.debug("configuration:{}{}", System.lineSeparator(), msg); } - + /** format internet address: java's default doesn't include everything useful */ private static String formatAddress(InterfaceAddress interfaceAddress) throws IOException { StringBuilder sb = new StringBuilder(); - + InetAddress address = interfaceAddress.getAddress(); if (address instanceof Inet6Address) { sb.append("inet6 "); @@ -122,10 +122,10 @@ final class IfConfig { sb.append(NetworkAddress.formatAddress(address)); int netmask = 0xFFFFFFFF << (32 - interfaceAddress.getNetworkPrefixLength()); sb.append(" netmask:" + NetworkAddress.formatAddress(InetAddress.getByAddress(new byte[] { - (byte)(netmask >>> 24), - (byte)(netmask >>> 16 & 0xFF), - (byte)(netmask >>> 8 & 0xFF), - (byte)(netmask & 0xFF) + (byte)(netmask >>> 24), + (byte)(netmask >>> 16 & 0xFF), + (byte)(netmask >>> 8 & 0xFF), + (byte)(netmask & 0xFF) }))); InetAddress broadcast = interfaceAddress.getBroadcast(); if (broadcast != null) { @@ -141,7 +141,7 @@ final class IfConfig { } return sb.toString(); } - + /** format network interface flags */ private static String formatFlags(NetworkInterface nic) throws SocketException { StringBuilder flags = new StringBuilder(); diff --git a/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java b/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java index b0598469d3a..7e4c1348f8e 100644 --- a/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java +++ b/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java @@ -22,14 +22,15 @@ package org.elasticsearch.common.network; import java.util.Arrays; import java.util.List; +import org.elasticsearch.action.support.replication.ReplicationTask; import org.elasticsearch.client.transport.TransportClientNodesService; import org.elasticsearch.client.transport.support.TransportProxyClient; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.Setting.Scope; import org.elasticsearch.common.util.ExtensionPoint; import org.elasticsearch.http.HttpServer; import org.elasticsearch.http.HttpServerTransport; @@ -139,6 +140,7 @@ import org.elasticsearch.rest.action.template.RestPutSearchTemplateAction; import org.elasticsearch.rest.action.termvectors.RestMultiTermVectorsAction; import org.elasticsearch.rest.action.termvectors.RestTermVectorsAction; import org.elasticsearch.rest.action.update.RestUpdateAction; +import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.local.LocalTransport; @@ -155,10 +157,11 @@ public class NetworkModule extends AbstractModule { public static final String LOCAL_TRANSPORT = "local"; public static final String NETTY_TRANSPORT = "netty"; - public static final Setting HTTP_TYPE_SETTING = Setting.simpleString("http.type", false, Scope.CLUSTER); - public static final Setting HTTP_ENABLED = Setting.boolSetting("http.enabled", true, false, Scope.CLUSTER); - public static final Setting TRANSPORT_SERVICE_TYPE_SETTING = Setting.simpleString("transport.service.type", false, Scope.CLUSTER); - public static final Setting TRANSPORT_TYPE_SETTING = Setting.simpleString("transport.type", false, Scope.CLUSTER); + public static final Setting HTTP_TYPE_SETTING = Setting.simpleString("http.type", Property.NodeScope); + public static final Setting HTTP_ENABLED = Setting.boolSetting("http.enabled", true, Property.NodeScope); + public static final Setting TRANSPORT_SERVICE_TYPE_SETTING = + Setting.simpleString("transport.service.type", Property.NodeScope); + public static final Setting TRANSPORT_TYPE_SETTING = Setting.simpleString("transport.type", Property.NodeScope); @@ -325,6 +328,7 @@ public class NetworkModule extends AbstractModule { registerTransportService(NETTY_TRANSPORT, TransportService.class); registerTransport(LOCAL_TRANSPORT, LocalTransport.class); registerTransport(NETTY_TRANSPORT, NettyTransport.class); + registerTaskStatus(ReplicationTask.Status.PROTOTYPE); if (transportClient == false) { registerHttpTransport(NETTY_TRANSPORT, NettyHttpServerTransport.class); @@ -370,6 +374,10 @@ public class NetworkModule extends AbstractModule { } } + public void registerTaskStatus(Task.Status prototype) { + namedWriteableRegistry.registerPrototype(Task.Status.class, prototype); + } + @Override protected void configure() { bind(NetworkService.class).toInstance(networkService); diff --git a/core/src/main/java/org/elasticsearch/common/network/NetworkService.java b/core/src/main/java/org/elasticsearch/common/network/NetworkService.java index 5e8dbc4dcad..ff1f3912cc5 100644 --- a/core/src/main/java/org/elasticsearch/common/network/NetworkService.java +++ b/core/src/main/java/org/elasticsearch/common/network/NetworkService.java @@ -22,6 +22,7 @@ package org.elasticsearch.common.network; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -34,6 +35,7 @@ import java.util.HashSet; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.TimeUnit; +import java.util.function.Function; /** * @@ -43,24 +45,33 @@ public class NetworkService extends AbstractComponent { /** By default, we bind to loopback interfaces */ public static final String DEFAULT_NETWORK_HOST = "_local_"; - public static final Setting> GLOBAL_NETWORK_HOST_SETTING = Setting.listSetting("network.host", Arrays.asList(DEFAULT_NETWORK_HOST), - s -> s, false, Setting.Scope.CLUSTER); - public static final Setting> GLOBAL_NETWORK_BINDHOST_SETTING = Setting.listSetting("network.bind_host", GLOBAL_NETWORK_HOST_SETTING, - s -> s, false, Setting.Scope.CLUSTER); - public static final Setting> GLOBAL_NETWORK_PUBLISHHOST_SETTING = Setting.listSetting("network.publish_host", GLOBAL_NETWORK_HOST_SETTING, - s -> s, false, Setting.Scope.CLUSTER); - public static final Setting NETWORK_SERVER = Setting.boolSetting("network.server", true, false, Setting.Scope.CLUSTER); + public static final Setting> GLOBAL_NETWORK_HOST_SETTING = + Setting.listSetting("network.host", Arrays.asList(DEFAULT_NETWORK_HOST), Function.identity(), Property.NodeScope); + public static final Setting> GLOBAL_NETWORK_BINDHOST_SETTING = + Setting.listSetting("network.bind_host", GLOBAL_NETWORK_HOST_SETTING, Function.identity(), Property.NodeScope); + public static final Setting> GLOBAL_NETWORK_PUBLISHHOST_SETTING = + Setting.listSetting("network.publish_host", GLOBAL_NETWORK_HOST_SETTING, Function.identity(), Property.NodeScope); + public static final Setting NETWORK_SERVER = Setting.boolSetting("network.server", true, Property.NodeScope); public static final class TcpSettings { - public static final Setting TCP_NO_DELAY = Setting.boolSetting("network.tcp.no_delay", true, false, Setting.Scope.CLUSTER); - public static final Setting TCP_KEEP_ALIVE = Setting.boolSetting("network.tcp.keep_alive", true, false, Setting.Scope.CLUSTER); - public static final Setting TCP_REUSE_ADDRESS = Setting.boolSetting("network.tcp.reuse_address", NetworkUtils.defaultReuseAddress(), false, Setting.Scope.CLUSTER); - public static final Setting TCP_SEND_BUFFER_SIZE = Setting.byteSizeSetting("network.tcp.send_buffer_size", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER); - public static final Setting TCP_RECEIVE_BUFFER_SIZE = Setting.byteSizeSetting("network.tcp.receive_buffer_size", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER); - public static final Setting TCP_BLOCKING = Setting.boolSetting("network.tcp.blocking", false, false, Setting.Scope.CLUSTER); - public static final Setting TCP_BLOCKING_SERVER = Setting.boolSetting("network.tcp.blocking_server", TCP_BLOCKING, false, Setting.Scope.CLUSTER); - public static final Setting TCP_BLOCKING_CLIENT = Setting.boolSetting("network.tcp.blocking_client", TCP_BLOCKING, false, Setting.Scope.CLUSTER); - public static final Setting TCP_CONNECT_TIMEOUT = Setting.timeSetting("network.tcp.connect_timeout", new TimeValue(30, TimeUnit.SECONDS), false, Setting.Scope.CLUSTER); + public static final Setting TCP_NO_DELAY = + Setting.boolSetting("network.tcp.no_delay", true, Property.NodeScope); + public static final Setting TCP_KEEP_ALIVE = + Setting.boolSetting("network.tcp.keep_alive", true, Property.NodeScope); + public static final Setting TCP_REUSE_ADDRESS = + Setting.boolSetting("network.tcp.reuse_address", NetworkUtils.defaultReuseAddress(), Property.NodeScope); + public static final Setting TCP_SEND_BUFFER_SIZE = + Setting.byteSizeSetting("network.tcp.send_buffer_size", new ByteSizeValue(-1), Property.NodeScope); + public static final Setting TCP_RECEIVE_BUFFER_SIZE = + Setting.byteSizeSetting("network.tcp.receive_buffer_size", new ByteSizeValue(-1), Property.NodeScope); + public static final Setting TCP_BLOCKING = + Setting.boolSetting("network.tcp.blocking", false, Property.NodeScope); + public static final Setting TCP_BLOCKING_SERVER = + Setting.boolSetting("network.tcp.blocking_server", TCP_BLOCKING, Property.NodeScope); + public static final Setting TCP_BLOCKING_CLIENT = + Setting.boolSetting("network.tcp.blocking_client", TCP_BLOCKING, Property.NodeScope); + public static final Setting TCP_CONNECT_TIMEOUT = + Setting.timeSetting("network.tcp.connect_timeout", new TimeValue(30, TimeUnit.SECONDS), Property.NodeScope); } /** diff --git a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index 453fc3f9a36..adffb8e9e01 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -44,23 +44,22 @@ public abstract class AbstractScopedSettings extends AbstractComponent { private final List> settingUpdaters = new CopyOnWriteArrayList<>(); private final Map> complexMatchers; private final Map> keySettings; - private final Setting.Scope scope; + private final Setting.Property scope; private static final Pattern KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])*[-\\w]+$"); private static final Pattern GROUP_KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])+$"); - protected AbstractScopedSettings(Settings settings, Set> settingsSet, Setting.Scope scope) { + protected AbstractScopedSettings(Settings settings, Set> settingsSet, Setting.Property scope) { super(settings); this.lastSettingsApplied = Settings.EMPTY; this.scope = scope; Map> complexMatchers = new HashMap<>(); Map> keySettings = new HashMap<>(); for (Setting setting : settingsSet) { - if (setting.getScope() != scope) { - throw new IllegalArgumentException("Setting must be a " + scope + " setting but was: " + setting.getScope()); - } - if (isValidKey(setting.getKey()) == false && (setting.isGroupSetting() && isValidGroupKey(setting.getKey())) == false) { - throw new IllegalArgumentException("illegal settings key: [" + setting.getKey() + "]"); + if (setting.getProperties().contains(scope) == false) { + throw new IllegalArgumentException("Setting must be a " + scope + " setting but has: " + setting.getProperties()); } + validateSettingKey(setting); + if (setting.hasComplexMatcher()) { Setting overlappingSetting = findOverlappingSetting(setting, complexMatchers); if (overlappingSetting != null) { @@ -76,6 +75,12 @@ public abstract class AbstractScopedSettings extends AbstractComponent { this.keySettings = Collections.unmodifiableMap(keySettings); } + protected void validateSettingKey(Setting setting) { + if (isValidKey(setting.getKey()) == false && (setting.isGroupSetting() && isValidGroupKey(setting.getKey())) == false) { + throw new IllegalArgumentException("illegal settings key: [" + setting.getKey() + "]"); + } + } + protected AbstractScopedSettings(Settings nodeSettings, Settings scopeSettings, AbstractScopedSettings other) { super(nodeSettings); this.lastSettingsApplied = scopeSettings; @@ -96,7 +101,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent { return GROUP_KEY_PATTERN.matcher(key).matches(); } - public Setting.Scope getScope() { + public Setting.Property getScope() { return this.scope; } @@ -342,8 +347,9 @@ public abstract class AbstractScopedSettings extends AbstractComponent { * Returns the value for the given setting. */ public T get(Setting setting) { - if (setting.getScope() != scope) { - throw new IllegalArgumentException("settings scope doesn't match the setting scope [" + this.scope + "] != [" + setting.getScope() + "]"); + if (setting.getProperties().contains(scope) == false) { + throw new IllegalArgumentException("settings scope doesn't match the setting scope [" + this.scope + "] not in [" + + setting.getProperties() + "]"); } if (get(setting.getKey()) == null) { throw new IllegalArgumentException("setting " + setting.getKey() + " has not been registered"); diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 9d7fc660cfc..38107db3a98 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -29,8 +29,10 @@ import org.elasticsearch.client.transport.TransportClientNodesService; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.InternalClusterInfoService; +import org.elasticsearch.cluster.NodeConnectionsService; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNodeService; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; @@ -41,10 +43,11 @@ import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDeci import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; -import org.elasticsearch.cluster.service.InternalClusterService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.discovery.DiscoveryModule; @@ -101,7 +104,7 @@ import java.util.function.Predicate; */ public final class ClusterSettings extends AbstractScopedSettings { public ClusterSettings(Settings nodeSettings, Set> settingsSet) { - super(nodeSettings, settingsSet, Setting.Scope.CLUSTER); + super(nodeSettings, settingsSet, Property.NodeScope); addSettingsUpdater(new LoggingSettingUpdater(nodeSettings)); } @@ -252,14 +255,14 @@ public final class ClusterSettings extends AbstractScopedSettings { HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, - InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, + ClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING, ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING, TransportService.TRACE_LOG_EXCLUDE_SETTING, TransportService.TRACE_LOG_INCLUDE_SETTING, TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING, ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING, - InternalClusterService.CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING, + NodeConnectionsService.CLUSTER_NODE_RECONNECT_INTERVAL_SETTING, HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING, HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, Transport.TRANSPORT_TCP_COMPRESS, @@ -326,7 +329,7 @@ public final class ClusterSettings extends AbstractScopedSettings { Environment.PATH_SCRIPTS_SETTING, Environment.PATH_SHARED_DATA_SETTING, Environment.PIDFILE_SETTING, - InternalClusterService.NODE_ID_SEED_SETTING, + DiscoveryNodeService.NODE_ID_SEED_SETTING, DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING, DiscoveryModule.DISCOVERY_TYPE_SETTING, DiscoveryModule.ZEN_MASTER_SERVICE_TYPE_SETTING, diff --git a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 69ef795812d..322ac4de799 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -22,6 +22,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.gateway.PrimaryShardAllocator; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; @@ -34,7 +35,8 @@ import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.percolator.PercolatorQueriesRegistry; +import org.elasticsearch.index.percolator.PercolatorQueryCache; +import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.store.FsDirectoryService; import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.index.store.Store; @@ -44,12 +46,13 @@ import org.elasticsearch.indices.IndicesRequestCache; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; +import java.util.Map; import java.util.Set; import java.util.function.Predicate; /** * Encapsulates all valid index level settings. - * @see org.elasticsearch.common.settings.Setting.Scope#INDEX + * @see Property#IndexScope */ public final class IndexScopedSettings extends AbstractScopedSettings { @@ -122,7 +125,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { FieldMapper.IGNORE_MALFORMED_SETTING, FieldMapper.COERCE_SETTING, Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING, - PercolatorQueriesRegistry.INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING, + PercolatorQueryCache.INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING, MapperService.INDEX_MAPPER_DYNAMIC_SETTING, MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING, BitsetFilterCache.INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING, @@ -133,16 +136,23 @@ public final class IndexScopedSettings extends AbstractScopedSettings { FsDirectoryService.INDEX_LOCK_FACTOR_SETTING, EngineConfig.INDEX_CODEC_SETTING, IndexWarmer.INDEX_NORMS_LOADING_SETTING, - // this sucks but we can't really validate all the analyzers/similarity in here - Setting.groupSetting("index.similarity.", false, Setting.Scope.INDEX), // this allows similarity settings to be passed - Setting.groupSetting("index.analysis.", false, Setting.Scope.INDEX) // this allows analysis settings to be passed + // validate that built-in similarities don't get redefined + Setting.groupSetting("index.similarity.", (s) -> { + Map groups = s.getAsGroups(); + for (String key : SimilarityService.BUILT_IN.keySet()) { + if (groups.containsKey(key)) { + throw new IllegalArgumentException("illegal value for [index.similarity."+ key + "] cannot redefine built-in similarity"); + } + } + }, Property.IndexScope), // this allows similarity settings to be passed + Setting.groupSetting("index.analysis.", Property.IndexScope) // this allows analysis settings to be passed ))); public static final IndexScopedSettings DEFAULT_SCOPED_SETTINGS = new IndexScopedSettings(Settings.EMPTY, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS); public IndexScopedSettings(Settings settings, Set> settingsSet) { - super(settings, settingsSet, Setting.Scope.INDEX); + super(settings, settingsSet, Property.IndexScope); } private IndexScopedSettings(Settings settings, IndexScopedSettings other, IndexMetaData metaData) { @@ -153,6 +163,14 @@ public final class IndexScopedSettings extends AbstractScopedSettings { return new IndexScopedSettings(settings, this, metaData); } + @Override + protected void validateSettingKey(Setting setting) { + if (setting.getKey().startsWith("index.") == false) { + throw new IllegalArgumentException("illegal settings key: [" + setting.getKey() + "] must start with [index.]"); + } + super.validateSettingKey(setting); + } + public boolean isPrivateSetting(String key) { switch (key) { case IndexMetaData.SETTING_CREATION_DATE: diff --git a/core/src/main/java/org/elasticsearch/common/settings/Setting.java b/core/src/main/java/org/elasticsearch/common/settings/Setting.java index 0b4e43744a5..f0e1b2e64ea 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -20,37 +20,47 @@ package org.elasticsearch.common.settings; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.support.ToXContentToBytes; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.MemorySizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.EnumSet; +import java.util.Enumeration; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.Function; +import java.util.function.Predicate; import java.util.regex.Pattern; import java.util.stream.Collectors; /** * A setting. Encapsulates typical stuff like default value, parsing, and scope. - * Some (dynamic=true) can by modified at run time using the API. + * Some (SettingsProperty.Dynamic) can by modified at run time using the API. * All settings inside elasticsearch or in any of the plugins should use this type-safe and generic settings infrastructure * together with {@link AbstractScopedSettings}. This class contains several utility methods that makes it straight forward * to add settings for the majority of the cases. For instance a simple boolean settings can be defined like this: *
{@code
- * public static final Setting; MY_BOOLEAN = Setting.boolSetting("my.bool.setting", true, false, Scope.CLUSTER);}
+ * public static final Setting; MY_BOOLEAN = Setting.boolSetting("my.bool.setting", true, SettingsProperty.NodeScope);}
  * 
* To retrieve the value of the setting a {@link Settings} object can be passed directly to the {@link Setting#get(Settings)} method. *
@@ -61,32 +71,81 @@ import java.util.stream.Collectors;
  * public enum Color {
  *     RED, GREEN, BLUE;
  * }
- * public static final Setting MY_BOOLEAN = new Setting<>("my.color.setting", Color.RED.toString(), Color::valueOf, false, Scope.CLUSTER);
+ * public static final Setting MY_BOOLEAN =
+ *     new Setting<>("my.color.setting", Color.RED.toString(), Color::valueOf, SettingsProperty.NodeScope);
  * }
  * 
*/ public class Setting extends ToXContentToBytes { + + public enum Property { + /** + * should be filtered in some api (mask password/credentials) + */ + Filtered, + + /** + * iff this setting can be dynamically updateable + */ + Dynamic, + + /** + * mark this setting as deprecated + */ + Deprecated, + + /** + * Node scope + */ + NodeScope, + + /** + * Index scope + */ + IndexScope + } + + private static final ESLogger logger = Loggers.getLogger(Setting.class); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); + private final Key key; protected final Function defaultValue; private final Function parser; - private final boolean dynamic; - private final Scope scope; + private final EnumSet properties; + + private static final EnumSet EMPTY_PROPERTIES = EnumSet.noneOf(Property.class); /** - * Creates a new Setting instance + * Creates a new Setting instance. When no scope is provided, we default to {@link Property#NodeScope}. * @param key the settings key for this setting. * @param defaultValue a default value function that returns the default values string representation. * @param parser a parser that parses the string rep into a complex datatype. - * @param dynamic true iff this setting can be dynamically updateable - * @param scope the scope of this setting + * @param properties properties for this setting like scope, filtering... */ - public Setting(Key key, Function defaultValue, Function parser, boolean dynamic, Scope scope) { + public Setting(Key key, Function defaultValue, Function parser, Property... properties) { assert parser.apply(defaultValue.apply(Settings.EMPTY)) != null || this.isGroupSetting(): "parser returned null"; this.key = key; this.defaultValue = defaultValue; this.parser = parser; - this.dynamic = dynamic; - this.scope = scope; + if (properties == null) { + throw new IllegalArgumentException("properties can not be null for setting [" + key + "]"); + } + if (properties.length == 0) { + this.properties = EMPTY_PROPERTIES; + } else { + this.properties = EnumSet.copyOf(Arrays.asList(properties)); + } + } + + /** + * Creates a new Setting instance + * @param key the settings key for this setting. + * @param defaultValue a default value. + * @param parser a parser that parses the string rep into a complex datatype. + * @param properties properties for this setting like scope, filtering... + */ + public Setting(String key, String defaultValue, Function parser, Property... properties) { + this(key, s -> defaultValue, parser, properties); } /** @@ -94,11 +153,10 @@ public class Setting extends ToXContentToBytes { * @param key the settings key for this setting. * @param defaultValue a default value function that returns the default values string representation. * @param parser a parser that parses the string rep into a complex datatype. - * @param dynamic true iff this setting can be dynamically updateable - * @param scope the scope of this setting + * @param properties properties for this setting like scope, filtering... */ - public Setting(String key, Function defaultValue, Function parser, boolean dynamic, Scope scope) { - this(new SimpleKey(key), defaultValue, parser, dynamic, scope); + public Setting(String key, Function defaultValue, Function parser, Property... properties) { + this(new SimpleKey(key), defaultValue, parser, properties); } /** @@ -106,11 +164,10 @@ public class Setting extends ToXContentToBytes { * @param key the settings key for this setting. * @param fallBackSetting a setting to fall back to if the current setting is not set. * @param parser a parser that parses the string rep into a complex datatype. - * @param dynamic true iff this setting can be dynamically updateable - * @param scope the scope of this setting + * @param properties properties for this setting like scope, filtering... */ - public Setting(String key, Setting fallBackSetting, Function parser, boolean dynamic, Scope scope) { - this(key, fallBackSetting::getRaw, parser, dynamic, scope); + public Setting(String key, Setting fallBackSetting, Function parser, Property... properties) { + this(key, fallBackSetting::getRaw, parser, properties); } /** @@ -132,17 +189,46 @@ public class Setting extends ToXContentToBytes { } /** - * Returns true iff this setting is dynamically updateable, otherwise false + * Returns true if this setting is dynamically updateable, otherwise false */ public final boolean isDynamic() { - return dynamic; + return properties.contains(Property.Dynamic); } /** - * Returns the settings scope + * Returns the setting properties + * @see Property */ - public final Scope getScope() { - return scope; + public EnumSet getProperties() { + return properties; + } + + /** + * Returns true if this setting must be filtered, otherwise false + */ + public boolean isFiltered() { + return properties.contains(Property.Filtered); + } + + /** + * Returns true if this setting has a node scope, otherwise false + */ + public boolean hasNodeScope() { + return properties.contains(Property.NodeScope); + } + + /** + * Returns true if this setting has an index scope, otherwise false + */ + public boolean hasIndexScope() { + return properties.contains(Property.IndexScope); + } + + /** + * Returns true if this setting is deprecated, otherwise false + */ + public boolean isDeprecated() { + return properties.contains(Property.Deprecated); } /** @@ -177,7 +263,7 @@ public class Setting extends ToXContentToBytes { /** * Returns true iff this setting is present in the given settings object. Otherwise false */ - public final boolean exists(Settings settings) { + public boolean exists(Settings settings) { return settings.get(getKey()) != null; } @@ -205,6 +291,12 @@ public class Setting extends ToXContentToBytes { * instead. This is useful if the value can't be parsed due to an invalid value to access the actual value. */ public String getRaw(Settings settings) { + // They're using the setting, so we need to tell them to stop + if (this.isDeprecated() && this.exists(settings)) { + // It would be convenient to show its replacement key, but replacement is often not so simple + deprecationLogger.deprecated("[{}] setting was deprecated in Elasticsearch and it will be removed in a future release! " + + "See the breaking changes lists in the documentation for details", getKey()); + } return settings.get(getKey(), defaultValue.apply(settings)); } @@ -221,8 +313,7 @@ public class Setting extends ToXContentToBytes { public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field("key", key.toString()); - builder.field("type", scope.name()); - builder.field("dynamic", dynamic); + builder.field("properties", properties); builder.field("is_group_setting", isGroupSetting()); builder.field("default", defaultValue.apply(Settings.EMPTY)); builder.endObject(); @@ -244,14 +335,6 @@ public class Setting extends ToXContentToBytes { return this; } - /** - * The settings scope - settings can either be cluster settings or per index settings. - */ - public enum Scope { - CLUSTER, - INDEX; - } - /** * Build a new updater with a noop validator. */ @@ -349,38 +432,34 @@ public class Setting extends ToXContentToBytes { } - public Setting(String key, String defaultValue, Function parser, boolean dynamic, Scope scope) { - this(key, (s) -> defaultValue, parser, dynamic, scope); + public static Setting floatSetting(String key, float defaultValue, Property... properties) { + return new Setting<>(key, (s) -> Float.toString(defaultValue), Float::parseFloat, properties); } - public static Setting floatSetting(String key, float defaultValue, boolean dynamic, Scope scope) { - return new Setting<>(key, (s) -> Float.toString(defaultValue), Float::parseFloat, dynamic, scope); - } - - public static Setting floatSetting(String key, float defaultValue, float minValue, boolean dynamic, Scope scope) { + public static Setting floatSetting(String key, float defaultValue, float minValue, Property... properties) { return new Setting<>(key, (s) -> Float.toString(defaultValue), (s) -> { float value = Float.parseFloat(s); if (value < minValue) { throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue); } return value; - }, dynamic, scope); + }, properties); } - public static Setting intSetting(String key, int defaultValue, int minValue, int maxValue, boolean dynamic, Scope scope) { - return new Setting<>(key, (s) -> Integer.toString(defaultValue), (s) -> parseInt(s, minValue, maxValue, key), dynamic, scope); + public static Setting intSetting(String key, int defaultValue, int minValue, int maxValue, Property... properties) { + return new Setting<>(key, (s) -> Integer.toString(defaultValue), (s) -> parseInt(s, minValue, maxValue, key), properties); } - public static Setting intSetting(String key, int defaultValue, int minValue, boolean dynamic, Scope scope) { - return new Setting<>(key, (s) -> Integer.toString(defaultValue), (s) -> parseInt(s, minValue, key), dynamic, scope); + public static Setting intSetting(String key, int defaultValue, int minValue, Property... properties) { + return new Setting<>(key, (s) -> Integer.toString(defaultValue), (s) -> parseInt(s, minValue, key), properties); } - public static Setting longSetting(String key, long defaultValue, long minValue, boolean dynamic, Scope scope) { - return new Setting<>(key, (s) -> Long.toString(defaultValue), (s) -> parseLong(s, minValue, key), dynamic, scope); + public static Setting longSetting(String key, long defaultValue, long minValue, Property... properties) { + return new Setting<>(key, (s) -> Long.toString(defaultValue), (s) -> parseLong(s, minValue, key), properties); } - public static Setting simpleString(String key, boolean dynamic, Scope scope) { - return new Setting<>(key, "", Function.identity(), dynamic, scope); + public static Setting simpleString(String key, Property... properties) { + return new Setting<>(key, s -> "", Function.identity(), properties); } public static int parseInt(String s, int minValue, String key) { @@ -414,51 +493,58 @@ public class Setting extends ToXContentToBytes { return timeValue; } - public static Setting intSetting(String key, int defaultValue, boolean dynamic, Scope scope) { - return intSetting(key, defaultValue, Integer.MIN_VALUE, dynamic, scope); + public static Setting intSetting(String key, int defaultValue, Property... properties) { + return intSetting(key, defaultValue, Integer.MIN_VALUE, properties); } - public static Setting boolSetting(String key, boolean defaultValue, boolean dynamic, Scope scope) { - return new Setting<>(key, (s) -> Boolean.toString(defaultValue), Booleans::parseBooleanExact, dynamic, scope); + public static Setting boolSetting(String key, boolean defaultValue, Property... properties) { + return new Setting<>(key, (s) -> Boolean.toString(defaultValue), Booleans::parseBooleanExact, properties); } - public static Setting boolSetting(String key, Setting fallbackSetting, boolean dynamic, Scope scope) { - return new Setting<>(key, fallbackSetting, Booleans::parseBooleanExact, dynamic, scope); + public static Setting boolSetting(String key, Setting fallbackSetting, Property... properties) { + return new Setting<>(key, fallbackSetting, Booleans::parseBooleanExact, properties); } - public static Setting byteSizeSetting(String key, String percentage, boolean dynamic, Scope scope) { - return new Setting<>(key, (s) -> percentage, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), dynamic, scope); + public static Setting byteSizeSetting(String key, String percentage, Property... properties) { + return new Setting<>(key, (s) -> percentage, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), properties); } - public static Setting byteSizeSetting(String key, ByteSizeValue value, boolean dynamic, Scope scope) { - return byteSizeSetting(key, (s) -> value.toString(), dynamic, scope); + public static Setting byteSizeSetting(String key, ByteSizeValue value, Property... properties) { + return byteSizeSetting(key, (s) -> value.toString(), properties); } - public static Setting byteSizeSetting(String key, Setting fallbackSettings, boolean dynamic, Scope scope) { - return byteSizeSetting(key, fallbackSettings::getRaw, dynamic, scope); + public static Setting byteSizeSetting(String key, Setting fallbackSettings, + Property... properties) { + return byteSizeSetting(key, fallbackSettings::getRaw, properties); } - public static Setting byteSizeSetting(String key, Function defaultValue, boolean dynamic, Scope scope) { - return new Setting<>(key, defaultValue, (s) -> ByteSizeValue.parseBytesSizeValue(s, key), dynamic, scope); + public static Setting byteSizeSetting(String key, Function defaultValue, + Property... properties) { + return new Setting<>(key, defaultValue, (s) -> ByteSizeValue.parseBytesSizeValue(s, key), properties); } - public static Setting positiveTimeSetting(String key, TimeValue defaultValue, boolean dynamic, Scope scope) { - return timeSetting(key, defaultValue, TimeValue.timeValueMillis(0), dynamic, scope); + public static Setting positiveTimeSetting(String key, TimeValue defaultValue, Property... properties) { + return timeSetting(key, defaultValue, TimeValue.timeValueMillis(0), properties); } - public static Setting> listSetting(String key, List defaultStringValue, Function singleValueParser, boolean dynamic, Scope scope) { - return listSetting(key, (s) -> defaultStringValue, singleValueParser, dynamic, scope); + public static Setting> listSetting(String key, List defaultStringValue, Function singleValueParser, + Property... properties) { + return listSetting(key, (s) -> defaultStringValue, singleValueParser, properties); } - public static Setting> listSetting(String key, Setting> fallbackSetting, Function singleValueParser, boolean dynamic, Scope scope) { - return listSetting(key, (s) -> parseableStringToList(fallbackSetting.getRaw(s)), singleValueParser, dynamic, scope); + public static Setting> listSetting(String key, Setting> fallbackSetting, Function singleValueParser, + Property... properties) { + return listSetting(key, (s) -> parseableStringToList(fallbackSetting.getRaw(s)), singleValueParser, properties); } - public static Setting> listSetting(String key, Function> defaultStringValue, Function singleValueParser, boolean dynamic, Scope scope) { + public static Setting> listSetting(String key, Function> defaultStringValue, + Function singleValueParser, Property... properties) { Function> parser = (s) -> parseableStringToList(s).stream().map(singleValueParser).collect(Collectors.toList()); - return new Setting>(new ListKey(key), (s) -> arrayToParsableString(defaultStringValue.apply(s).toArray(Strings.EMPTY_ARRAY)), parser, dynamic, scope) { + return new Setting>(new ListKey(key), + (s) -> arrayToParsableString(defaultStringValue.apply(s).toArray(Strings.EMPTY_ARRAY)), parser, properties) { + private final Pattern pattern = Pattern.compile(Pattern.quote(key)+"(\\.\\d+)?"); @Override public String getRaw(Settings settings) { String[] array = settings.getAsArray(getKey(), null); @@ -505,17 +591,45 @@ public class Setting extends ToXContentToBytes { throw new ElasticsearchException(ex); } } - - public static Setting groupSetting(String key, boolean dynamic, Scope scope) { - return new Setting(new GroupKey(key), (s) -> "", (s) -> null, dynamic, scope) { + public static Setting groupSetting(String key, Property... properties) { + return groupSetting(key, (s) -> {}, properties); + } + public static Setting groupSetting(String key, Consumer validator, Property... properties) { + return new Setting(new GroupKey(key), (s) -> "", (s) -> null, properties) { @Override public boolean isGroupSetting() { return true; } + @Override + public String getRaw(Settings settings) { + Settings subSettings = get(settings); + try { + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + subSettings.toXContent(builder, EMPTY_PARAMS); + builder.endObject(); + return builder.string(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + @Override public Settings get(Settings settings) { - return settings.getByPrefix(getKey()); + Settings byPrefix = settings.getByPrefix(getKey()); + validator.accept(byPrefix); + return byPrefix; + } + + @Override + public boolean exists(Settings settings) { + for (Map.Entry entry : settings.getAsMap().entrySet()) { + if (entry.getKey().startsWith(key)) { + return true; + } + } + return false; } @Override @@ -560,30 +674,37 @@ public class Setting extends ToXContentToBytes { }; } - public static Setting timeSetting(String key, Function defaultValue, TimeValue minValue, boolean dynamic, Scope scope) { - return new Setting<>(key, defaultValue, (s) -> parseTimeValue(s, minValue, key), dynamic, scope); + public static Setting timeSetting(String key, Function defaultValue, TimeValue minValue, + Property... properties) { + return new Setting<>(key, defaultValue, (s) -> { + TimeValue timeValue = TimeValue.parseTimeValue(s, null, key); + if (timeValue.millis() < minValue.millis()) { + throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue); + } + return timeValue; + }, properties); } - public static Setting timeSetting(String key, TimeValue defaultValue, TimeValue minValue, boolean dynamic, Scope scope) { - return timeSetting(key, (s) -> defaultValue.getStringRep(), minValue, dynamic, scope); + public static Setting timeSetting(String key, TimeValue defaultValue, TimeValue minValue, Property... properties) { + return timeSetting(key, (s) -> defaultValue.getStringRep(), minValue, properties); } - public static Setting timeSetting(String key, TimeValue defaultValue, boolean dynamic, Scope scope) { - return new Setting<>(key, (s) -> defaultValue.toString(), (s) -> TimeValue.parseTimeValue(s, key), dynamic, scope); + public static Setting timeSetting(String key, TimeValue defaultValue, Property... properties) { + return new Setting<>(key, (s) -> defaultValue.toString(), (s) -> TimeValue.parseTimeValue(s, key), properties); } - public static Setting timeSetting(String key, Setting fallbackSetting, boolean dynamic, Scope scope) { - return new Setting<>(key, fallbackSetting::getRaw, (s) -> TimeValue.parseTimeValue(s, key), dynamic, scope); + public static Setting timeSetting(String key, Setting fallbackSetting, Property... properties) { + return new Setting<>(key, fallbackSetting::getRaw, (s) -> TimeValue.parseTimeValue(s, key), properties); } - public static Setting doubleSetting(String key, double defaultValue, double minValue, boolean dynamic, Scope scope) { + public static Setting doubleSetting(String key, double defaultValue, double minValue, Property... properties) { return new Setting<>(key, (s) -> Double.toString(defaultValue), (s) -> { final double d = Double.parseDouble(s); if (d < minValue) { throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue); } return d; - }, dynamic, scope); + }, properties); } @Override @@ -604,8 +725,9 @@ public class Setting extends ToXContentToBytes { * can easily be added with this setting. Yet, prefix key settings don't support updaters out of the box unless * {@link #getConcreteSetting(String)} is used to pull the updater. */ - public static Setting prefixKeySetting(String prefix, String defaultValue, Function parser, boolean dynamic, Scope scope) { - return affixKeySetting(AffixKey.withPrefix(prefix), (s) -> defaultValue, parser, dynamic, scope); + public static Setting prefixKeySetting(String prefix, String defaultValue, Function parser, + Property... properties) { + return affixKeySetting(AffixKey.withPrefix(prefix), (s) -> defaultValue, parser, properties); } /** @@ -613,16 +735,19 @@ public class Setting extends ToXContentToBytes { * storage.${backend}.enable=[true|false] can easily be added with this setting. Yet, adfix key settings don't support updaters * out of the box unless {@link #getConcreteSetting(String)} is used to pull the updater. */ - public static Setting adfixKeySetting(String prefix, String suffix, Function defaultValue, Function parser, boolean dynamic, Scope scope) { - return affixKeySetting(AffixKey.withAdfix(prefix, suffix), defaultValue, parser, dynamic, scope); + public static Setting adfixKeySetting(String prefix, String suffix, Function defaultValue, + Function parser, Property... properties) { + return affixKeySetting(AffixKey.withAdfix(prefix, suffix), defaultValue, parser, properties); } - public static Setting adfixKeySetting(String prefix, String suffix, String defaultValue, Function parser, boolean dynamic, Scope scope) { - return adfixKeySetting(prefix, suffix, (s) -> defaultValue, parser, dynamic, scope); + public static Setting adfixKeySetting(String prefix, String suffix, String defaultValue, Function parser, + Property... properties) { + return adfixKeySetting(prefix, suffix, (s) -> defaultValue, parser, properties); } - public static Setting affixKeySetting(AffixKey key, Function defaultValue, Function parser, boolean dynamic, Scope scope) { - return new Setting(key, defaultValue, parser, dynamic, scope) { + public static Setting affixKeySetting(AffixKey key, Function defaultValue, Function parser, + Property... properties) { + return new Setting(key, defaultValue, parser, properties) { @Override boolean isGroupSetting() { @@ -637,7 +762,7 @@ public class Setting extends ToXContentToBytes { @Override public Setting getConcreteSetting(String key) { if (match(key)) { - return new Setting<>(key, defaultValue, parser, dynamic, scope); + return new Setting<>(key, defaultValue, parser, properties); } else { throw new IllegalArgumentException("key [" + key + "] must match [" + getKey() + "] but didn't."); } diff --git a/core/src/main/java/org/elasticsearch/common/settings/Settings.java b/core/src/main/java/org/elasticsearch/common/settings/Settings.java index aafaff3e9d7..a6784e561d2 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Settings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Settings.java @@ -761,6 +761,14 @@ public final class Settings implements ToXContent { return builder; } + /** + * Returns true if this settings object contains no settings + * @return true if this settings object contains no settings + */ + public boolean isEmpty() { + return this.settings.isEmpty(); + } + /** * A builder allowing to put different settings and then {@link #build()} an immutable * settings implementation. Use {@link Settings#settingsBuilder()} in order to @@ -1136,10 +1144,10 @@ public final class Settings implements ToXContent { * @param properties The properties to put * @return The builder */ - public Builder putProperties(String prefix, Dictionary properties) { - for (Object key1 : Collections.list(properties.keys())) { - String key = Objects.toString(key1); - String value = Objects.toString(properties.get(key)); + public Builder putProperties(String prefix, Dictionary properties) { + for (Object property : Collections.list(properties.keys())) { + String key = Objects.toString(property); + String value = Objects.toString(properties.get(property)); if (key.startsWith(prefix)) { map.put(key.substring(prefix.length()), value); } @@ -1154,19 +1162,12 @@ public final class Settings implements ToXContent { * @param properties The properties to put * @return The builder */ - public Builder putProperties(String prefix, Dictionary properties, String[] ignorePrefixes) { - for (Object key1 : Collections.list(properties.keys())) { - String key = Objects.toString(key1); - String value = Objects.toString(properties.get(key)); + public Builder putProperties(String prefix, Dictionary properties, String ignorePrefix) { + for (Object property : Collections.list(properties.keys())) { + String key = Objects.toString(property); + String value = Objects.toString(properties.get(property)); if (key.startsWith(prefix)) { - boolean ignore = false; - for (String ignorePrefix : ignorePrefixes) { - if (key.startsWith(ignorePrefix)) { - ignore = true; - break; - } - } - if (!ignore) { + if (!key.startsWith(ignorePrefix)) { map.put(key.substring(prefix.length()), value); } } diff --git a/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java b/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java index b06f53459c8..33233ff627e 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java +++ b/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java @@ -35,7 +35,7 @@ public class SettingsModule extends AbstractModule { private final Settings settings; private final Set settingsFilterPattern = new HashSet<>(); - private final Map> clusterSettings = new HashMap<>(); + private final Map> nodeSettings = new HashMap<>(); private final Map> indexSettings = new HashMap<>(); private static final Predicate TRIBE_CLIENT_NODE_SETTINGS_PREDICATE = (s) -> s.startsWith("tribe.") && TribeService.TRIBE_SETTING_KEYS.contains(s) == false; @@ -52,10 +52,9 @@ public class SettingsModule extends AbstractModule { @Override protected void configure() { final IndexScopedSettings indexScopedSettings = new IndexScopedSettings(settings, new HashSet<>(this.indexSettings.values())); - final ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(this.clusterSettings.values())); + final ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(this.nodeSettings.values())); // by now we are fully configured, lets check node level settings for unregistered index settings - indexScopedSettings.validate(settings.filter(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE)); - final Predicate acceptOnlyClusterSettings = TRIBE_CLIENT_NODE_SETTINGS_PREDICATE.or(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE).negate(); + final Predicate acceptOnlyClusterSettings = TRIBE_CLIENT_NODE_SETTINGS_PREDICATE.negate(); clusterSettings.validate(settings.filter(acceptOnlyClusterSettings)); validateTribeSettings(settings, clusterSettings); bind(Settings.class).toInstance(settings); @@ -71,19 +70,26 @@ public class SettingsModule extends AbstractModule { * the setting during startup. */ public void registerSetting(Setting setting) { - switch (setting.getScope()) { - case CLUSTER: - if (clusterSettings.containsKey(setting.getKey())) { + if (setting.isFiltered()) { + if (settingsFilterPattern.contains(setting.getKey()) == false) { + registerSettingsFilter(setting.getKey()); + } + } + if (setting.hasNodeScope() || setting.hasIndexScope()) { + if (setting.hasNodeScope()) { + if (nodeSettings.containsKey(setting.getKey())) { throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice"); } - clusterSettings.put(setting.getKey(), setting); - break; - case INDEX: + nodeSettings.put(setting.getKey(), setting); + } + if (setting.hasIndexScope()) { if (indexSettings.containsKey(setting.getKey())) { throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice"); } indexSettings.put(setting.getKey(), setting); - break; + } + } else { + throw new IllegalArgumentException("No scope found for setting [" + setting.getKey() + "]"); } } @@ -101,21 +107,15 @@ public class SettingsModule extends AbstractModule { settingsFilterPattern.add(filter); } - public void registerSettingsFilterIfMissing(String filter) { - if (settingsFilterPattern.contains(filter) == false) { - registerSettingsFilter(filter); - } - } - /** * Check if a setting has already been registered */ public boolean exists(Setting setting) { - switch (setting.getScope()) { - case CLUSTER: - return clusterSettings.containsKey(setting.getKey()); - case INDEX: - return indexSettings.containsKey(setting.getKey()); + if (setting.hasNodeScope()) { + return nodeSettings.containsKey(setting.getKey()); + } + if (setting.hasIndexScope()) { + return indexSettings.containsKey(setting.getKey()); } throw new IllegalArgumentException("setting scope is unknown. This should never happen!"); } diff --git a/core/src/main/java/org/elasticsearch/common/util/BigArrays.java b/core/src/main/java/org/elasticsearch/common/util/BigArrays.java index faa377baccd..dfef49ca9d4 100644 --- a/core/src/main/java/org/elasticsearch/common/util/BigArrays.java +++ b/core/src/main/java/org/elasticsearch/common/util/BigArrays.java @@ -41,9 +41,9 @@ public class BigArrays { /** Page size in bytes: 16KB */ public static final int PAGE_SIZE_IN_BYTES = 1 << 14; - public static final int BYTE_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_BYTE; - public static final int INT_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_INT; - public static final int LONG_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_LONG; + public static final int BYTE_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES; + public static final int INT_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / Integer.BYTES; + public static final int LONG_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / Long.BYTES; public static final int OBJECT_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_OBJECT_REF; /** Returns the next size to grow when working with parallel arrays that may have different page sizes or number of bytes per element. */ @@ -490,7 +490,7 @@ public class BigArrays { if (minSize <= array.size()) { return array; } - final long newSize = overSize(minSize, BYTE_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_BYTE); + final long newSize = overSize(minSize, BYTE_PAGE_SIZE, 1); return resize(array, newSize); } @@ -573,7 +573,7 @@ public class BigArrays { if (minSize <= array.size()) { return array; } - final long newSize = overSize(minSize, INT_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_INT); + final long newSize = overSize(minSize, INT_PAGE_SIZE, Integer.BYTES); return resize(array, newSize); } @@ -623,7 +623,7 @@ public class BigArrays { if (minSize <= array.size()) { return array; } - final long newSize = overSize(minSize, LONG_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_LONG); + final long newSize = overSize(minSize, LONG_PAGE_SIZE, Long.BYTES); return resize(array, newSize); } @@ -670,7 +670,7 @@ public class BigArrays { if (minSize <= array.size()) { return array; } - final long newSize = overSize(minSize, LONG_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_LONG); + final long newSize = overSize(minSize, LONG_PAGE_SIZE, Long.BYTES); return resize(array, newSize); } @@ -717,7 +717,7 @@ public class BigArrays { if (minSize <= array.size()) { return array; } - final long newSize = overSize(minSize, INT_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_FLOAT); + final long newSize = overSize(minSize, INT_PAGE_SIZE, Float.BYTES); return resize(array, newSize); } diff --git a/core/src/main/java/org/elasticsearch/common/util/BigByteArray.java b/core/src/main/java/org/elasticsearch/common/util/BigByteArray.java index da4bc28408d..cac3132385f 100644 --- a/core/src/main/java/org/elasticsearch/common/util/BigByteArray.java +++ b/core/src/main/java/org/elasticsearch/common/util/BigByteArray.java @@ -127,7 +127,7 @@ final class BigByteArray extends AbstractBigArray implements ByteArray { @Override protected int numBytesPerElement() { - return RamUsageEstimator.NUM_BYTES_BYTE; + return 1; } /** Change the size of this array. Content between indexes 0 and min(size(), newSize) will be preserved. */ diff --git a/core/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java b/core/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java index 1f739188377..4aab593affe 100644 --- a/core/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java +++ b/core/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java @@ -71,7 +71,7 @@ final class BigDoubleArray extends AbstractBigArray implements DoubleArray { @Override protected int numBytesPerElement() { - return RamUsageEstimator.NUM_BYTES_INT; + return Integer.BYTES; } /** Change the size of this array. Content between indexes 0 and min(size(), newSize) will be preserved. */ diff --git a/core/src/main/java/org/elasticsearch/common/util/BigFloatArray.java b/core/src/main/java/org/elasticsearch/common/util/BigFloatArray.java index f6fc2d8fce0..1fa79a9f3db 100644 --- a/core/src/main/java/org/elasticsearch/common/util/BigFloatArray.java +++ b/core/src/main/java/org/elasticsearch/common/util/BigFloatArray.java @@ -71,7 +71,7 @@ final class BigFloatArray extends AbstractBigArray implements FloatArray { @Override protected int numBytesPerElement() { - return RamUsageEstimator.NUM_BYTES_FLOAT; + return Float.BYTES; } /** Change the size of this array. Content between indexes 0 and min(size(), newSize) will be preserved. */ diff --git a/core/src/main/java/org/elasticsearch/common/util/BigIntArray.java b/core/src/main/java/org/elasticsearch/common/util/BigIntArray.java index 1c0e9fe017c..4ce5fc7acee 100644 --- a/core/src/main/java/org/elasticsearch/common/util/BigIntArray.java +++ b/core/src/main/java/org/elasticsearch/common/util/BigIntArray.java @@ -88,7 +88,7 @@ final class BigIntArray extends AbstractBigArray implements IntArray { @Override protected int numBytesPerElement() { - return RamUsageEstimator.NUM_BYTES_INT; + return Integer.BYTES; } /** Change the size of this array. Content between indexes 0 and min(size(), newSize) will be preserved. */ diff --git a/core/src/main/java/org/elasticsearch/common/util/BigLongArray.java b/core/src/main/java/org/elasticsearch/common/util/BigLongArray.java index fe0323ba67c..2e3248143b4 100644 --- a/core/src/main/java/org/elasticsearch/common/util/BigLongArray.java +++ b/core/src/main/java/org/elasticsearch/common/util/BigLongArray.java @@ -70,7 +70,7 @@ final class BigLongArray extends AbstractBigArray implements LongArray { @Override protected int numBytesPerElement() { - return RamUsageEstimator.NUM_BYTES_LONG; + return Long.BYTES; } /** Change the size of this array. Content between indexes 0 and min(size(), newSize) will be preserved. */ diff --git a/core/src/main/java/org/elasticsearch/common/util/BigObjectArray.java b/core/src/main/java/org/elasticsearch/common/util/BigObjectArray.java index ab923a195ca..19a41d3096d 100644 --- a/core/src/main/java/org/elasticsearch/common/util/BigObjectArray.java +++ b/core/src/main/java/org/elasticsearch/common/util/BigObjectArray.java @@ -65,7 +65,7 @@ final class BigObjectArray extends AbstractBigArray implements ObjectArray @Override protected int numBytesPerElement() { - return RamUsageEstimator.NUM_BYTES_INT; + return Integer.BYTES; } /** Change the size of this array. Content between indexes 0 and min(size(), newSize) will be preserved. */ diff --git a/core/src/main/java/org/elasticsearch/common/util/BloomFilter.java b/core/src/main/java/org/elasticsearch/common/util/BloomFilter.java index fdc94d53849..b9dd6859ce0 100644 --- a/core/src/main/java/org/elasticsearch/common/util/BloomFilter.java +++ b/core/src/main/java/org/elasticsearch/common/util/BloomFilter.java @@ -388,7 +388,7 @@ public class BloomFilter { } public long ramBytesUsed() { - return RamUsageEstimator.NUM_BYTES_LONG * data.length + RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + 16; + return Long.BYTES * data.length + RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + 16; } } diff --git a/core/src/main/java/org/elasticsearch/common/util/CollectionUtils.java b/core/src/main/java/org/elasticsearch/common/util/CollectionUtils.java index 739677342f7..36e0b19c782 100644 --- a/core/src/main/java/org/elasticsearch/common/util/CollectionUtils.java +++ b/core/src/main/java/org/elasticsearch/common/util/CollectionUtils.java @@ -333,7 +333,7 @@ public class CollectionUtils { assert indices.length >= numValues; if (numValues > 1) { new InPlaceMergeSorter() { - final Comparator comparator = BytesRef.getUTF8SortedAsUnicodeComparator(); + final Comparator comparator = Comparator.naturalOrder(); @Override protected int compare(int i, int j) { return comparator.compare(bytes.get(scratch, indices[i]), bytes.get(scratch1, indices[j])); diff --git a/core/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java b/core/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java new file mode 100644 index 00000000000..3640d3e4bec --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java @@ -0,0 +1,137 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.util; + +import org.apache.lucene.util.IOUtils; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.gateway.MetaDataStateFormat; +import org.elasticsearch.gateway.MetaStateService; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexSettings; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.NoSuchFileException; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; + +/** + * Renames index folders from {index.name} to {index.uuid} + */ +public class IndexFolderUpgrader { + private final NodeEnvironment nodeEnv; + private final Settings settings; + private final ESLogger logger = Loggers.getLogger(IndexFolderUpgrader.class); + + /** + * Creates a new upgrader instance + * @param settings node settings + * @param nodeEnv the node env to operate on + */ + IndexFolderUpgrader(Settings settings, NodeEnvironment nodeEnv) { + this.settings = settings; + this.nodeEnv = nodeEnv; + } + + /** + * Moves the index folder found in source to target + */ + void upgrade(final Index index, final Path source, final Path target) throws IOException { + boolean success = false; + try { + Files.move(source, target, StandardCopyOption.ATOMIC_MOVE); + success = true; + } catch (NoSuchFileException | FileNotFoundException exception) { + // thrown when the source is non-existent because the folder was renamed + // by another node (shared FS) after we checked if the target exists + logger.error("multiple nodes trying to upgrade [{}] in parallel, retry upgrading with single node", + exception, target); + throw exception; + } finally { + if (success) { + logger.info("{} moved from [{}] to [{}]", index, source, target); + logger.trace("{} syncing directory [{}]", index, target); + IOUtils.fsync(target, true); + } + } + } + + /** + * Renames indexFolderName index folders found in node paths and custom path + * iff {@link #needsUpgrade(Index, String)} is true. + * Index folder in custom paths are renamed first followed by index folders in each node path. + */ + void upgrade(final String indexFolderName) throws IOException { + for (NodeEnvironment.NodePath nodePath : nodeEnv.nodePaths()) { + final Path indexFolderPath = nodePath.indicesPath.resolve(indexFolderName); + final IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, indexFolderPath); + if (indexMetaData != null) { + final Index index = indexMetaData.getIndex(); + if (needsUpgrade(index, indexFolderName)) { + logger.info("{} upgrading [{}] to new naming convention", index, indexFolderPath); + final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings); + if (indexSettings.hasCustomDataPath()) { + // we rename index folder in custom path before renaming them in any node path + // to have the index state under a not-yet-upgraded index folder, which we use to + // continue renaming after a incomplete upgrade. + final Path customLocationSource = nodeEnv.resolveBaseCustomLocation(indexSettings) + .resolve(indexFolderName); + final Path customLocationTarget = customLocationSource.resolveSibling(index.getUUID()); + // we rename the folder in custom path only the first time we encounter a state + // in a node path, which needs upgrading, it is a no-op for subsequent node paths + if (Files.exists(customLocationSource) // might not exist if no data was written for this index + && Files.exists(customLocationTarget) == false) { + upgrade(index, customLocationSource, customLocationTarget); + } else { + logger.info("[{}] no upgrade needed - already upgraded", customLocationTarget); + } + } + upgrade(index, indexFolderPath, indexFolderPath.resolveSibling(index.getUUID())); + } else { + logger.debug("[{}] no upgrade needed - already upgraded", indexFolderPath); + } + } else { + logger.warn("[{}] no index state found - ignoring", indexFolderPath); + } + } + } + + /** + * Upgrades all indices found under nodeEnv. Already upgraded indices are ignored. + */ + public static void upgradeIndicesIfNeeded(final Settings settings, final NodeEnvironment nodeEnv) throws IOException { + final IndexFolderUpgrader upgrader = new IndexFolderUpgrader(settings, nodeEnv); + for (String indexFolderName : nodeEnv.availableIndexFolders()) { + upgrader.upgrade(indexFolderName); + } + } + + static boolean needsUpgrade(Index index, String indexFolderName) { + return indexFolderName.equals(index.getUUID()) == false; + } +} diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java index 10b1412425c..df1288d4fd2 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java @@ -20,6 +20,7 @@ package org.elasticsearch.common.util.concurrent; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import java.util.Arrays; @@ -41,7 +42,8 @@ public class EsExecutors { * Settings key to manually set the number of available processors. * This is used to adjust thread pools sizes etc. per node. */ - public static final Setting PROCESSORS_SETTING = Setting.intSetting("processors", Math.min(32, Runtime.getRuntime().availableProcessors()), 1, false, Setting.Scope.CLUSTER) ; + public static final Setting PROCESSORS_SETTING = + Setting.intSetting("processors", Math.min(32, Runtime.getRuntime().availableProcessors()), 1, Property.NodeScope); /** * Returns the number of processors available but at most 32. diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java index fde8d828295..2f664679bb4 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java @@ -40,11 +40,14 @@ public class EsThreadPoolExecutor extends ThreadPoolExecutor { */ private final String name; - EsThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue workQueue, ThreadFactory threadFactory, ThreadContext contextHolder) { + EsThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, + BlockingQueue workQueue, ThreadFactory threadFactory, ThreadContext contextHolder) { this(name, corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory, new EsAbortPolicy(), contextHolder); } - EsThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue workQueue, ThreadFactory threadFactory, XRejectedExecutionHandler handler, ThreadContext contextHolder) { + EsThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, + BlockingQueue workQueue, ThreadFactory threadFactory, XRejectedExecutionHandler handler, + ThreadContext contextHolder) { super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory, handler); this.name = name; this.contextHolder = contextHolder; @@ -133,112 +136,10 @@ public class EsThreadPoolExecutor extends ThreadPoolExecutor { } protected Runnable wrapRunnable(Runnable command) { - final Runnable wrappedCommand; - if (command instanceof AbstractRunnable) { - wrappedCommand = new FilterAbstractRunnable(contextHolder, (AbstractRunnable) command); - } else { - wrappedCommand = new FilterRunnable(contextHolder, command); - } - return wrappedCommand; + return contextHolder.preserveContext(command); } protected Runnable unwrap(Runnable runnable) { - if (runnable instanceof FilterAbstractRunnable) { - return ((FilterAbstractRunnable) runnable).in; - } else if (runnable instanceof FilterRunnable) { - return ((FilterRunnable) runnable).in; - } - return runnable; + return contextHolder.unwrap(runnable); } - - private class FilterAbstractRunnable extends AbstractRunnable { - private final ThreadContext contextHolder; - private final AbstractRunnable in; - private final ThreadContext.StoredContext ctx; - - FilterAbstractRunnable(ThreadContext contextHolder, AbstractRunnable in) { - this.contextHolder = contextHolder; - ctx = contextHolder.newStoredContext(); - this.in = in; - } - - @Override - public boolean isForceExecution() { - return in.isForceExecution(); - } - - @Override - public void onAfter() { - in.onAfter(); - } - - @Override - public void onFailure(Throwable t) { - in.onFailure(t); - } - - @Override - public void onRejection(Throwable t) { - in.onRejection(t); - } - - @Override - protected void doRun() throws Exception { - boolean whileRunning = false; - try (ThreadContext.StoredContext ingore = contextHolder.stashContext()){ - ctx.restore(); - whileRunning = true; - in.doRun(); - whileRunning = false; - } catch (IllegalStateException ex) { - if (whileRunning || isShutdown() == false) { - throw ex; - } - // if we hit an ISE here we have been shutting down - // this comes from the threadcontext and barfs if - // our threadpool has been shutting down - } - } - - @Override - public String toString() { - return in.toString(); - } - - } - - private class FilterRunnable implements Runnable { - private final ThreadContext contextHolder; - private final Runnable in; - private final ThreadContext.StoredContext ctx; - - FilterRunnable(ThreadContext contextHolder, Runnable in) { - this.contextHolder = contextHolder; - ctx = contextHolder.newStoredContext(); - this.in = in; - } - - @Override - public void run() { - boolean whileRunning = false; - try (ThreadContext.StoredContext ingore = contextHolder.stashContext()){ - ctx.restore(); - whileRunning = true; - in.run(); - whileRunning = false; - } catch (IllegalStateException ex) { - if (whileRunning || isShutdown() == false) { - throw ex; - } - // if we hit an ISE here we have been shutting down - // this comes from the threadcontext and barfs if - // our threadpool has been shutting down - } - } - @Override - public String toString() { - return in.toString(); - } - } - } diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java index 83bb9fd690d..5c30330c156 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java @@ -20,7 +20,10 @@ package org.elasticsearch.common.util.concurrent; +import org.elasticsearch.common.lease.Releasable; + import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.ReentrantLock; @@ -29,9 +32,8 @@ import java.util.concurrent.locks.ReentrantLock; * created the first time they are acquired and removed if no thread hold the * lock. The latter is important to assure that the list of locks does not grow * infinitely. - * - * A Thread can acquire a lock only once. - * + * + * * */ public class KeyedLock { @@ -50,48 +52,38 @@ public class KeyedLock { private final ConcurrentMap map = ConcurrentCollections.newConcurrentMap(); - protected final ThreadLocal threadLocal = new ThreadLocal<>(); - - public void acquire(T key) { + public Releasable acquire(T key) { + assert isHeldByCurrentThread(key) == false : "lock for " + key + " is already heald by this thread"; while (true) { - if (threadLocal.get() != null) { - // if we are here, the thread already has the lock - throw new IllegalStateException("Lock already acquired in Thread" + Thread.currentThread().getId() - + " for key " + key); - } KeyLock perNodeLock = map.get(key); if (perNodeLock == null) { KeyLock newLock = new KeyLock(fair); perNodeLock = map.putIfAbsent(key, newLock); if (perNodeLock == null) { newLock.lock(); - threadLocal.set(newLock); - return; + return new ReleasableLock(key, newLock); } } assert perNodeLock != null; int i = perNodeLock.count.get(); if (i > 0 && perNodeLock.count.compareAndSet(i, i + 1)) { perNodeLock.lock(); - threadLocal.set(perNodeLock); - return; + return new ReleasableLock(key, perNodeLock); } } } - public void release(T key) { - KeyLock lock = threadLocal.get(); + public boolean isHeldByCurrentThread(T key) { + KeyLock lock = map.get(key); if (lock == null) { - throw new IllegalStateException("Lock not acquired"); + return false; } - release(key, lock); + return lock.isHeldByCurrentThread(); } void release(T key, KeyLock lock) { - assert lock.isHeldByCurrentThread(); assert lock == map.get(key); lock.unlock(); - threadLocal.set(null); int decrementAndGet = lock.count.decrementAndGet(); if (decrementAndGet == 0) { map.remove(key, lock); @@ -99,6 +91,24 @@ public class KeyedLock { } + private final class ReleasableLock implements Releasable { + final T key; + final KeyLock lock; + final AtomicBoolean closed = new AtomicBoolean(); + + private ReleasableLock(T key, KeyLock lock) { + this.key = key; + this.lock = lock; + } + + @Override + public void close() { + if (closed.compareAndSet(false, true)) { + release(key, lock); + } + } + } + @SuppressWarnings("serial") private final static class KeyLock extends ReentrantLock { KeyLock(boolean fair) { diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java index 1928392fe41..462b4f539dc 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java @@ -19,11 +19,11 @@ package org.elasticsearch.common.util.concurrent; import org.apache.lucene.util.CloseableThreadLocal; -import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import java.io.Closeable; @@ -63,7 +63,7 @@ import java.util.concurrent.atomic.AtomicBoolean; public final class ThreadContext implements Closeable, Writeable{ public static final String PREFIX = "request.headers"; - public static final Setting DEFAULT_HEADERS_SETTING = Setting.groupSetting(PREFIX + ".", false, Setting.Scope.CLUSTER); + public static final Setting DEFAULT_HEADERS_SETTING = Setting.groupSetting(PREFIX + ".", Property.NodeScope); private final Map defaultHeader; private static final ThreadContextStruct DEFAULT_CONTEXT = new ThreadContextStruct(Collections.emptyMap()); private final ContextThreadLocal threadLocal; @@ -200,6 +200,36 @@ public final class ThreadContext implements Closeable, Writeablecommand has already been passed through this method then it is returned unaltered rather than wrapped twice. + */ + public Runnable preserveContext(Runnable command) { + if (command instanceof ContextPreservingAbstractRunnable) { + return command; + } + if (command instanceof ContextPreservingRunnable) { + return command; + } + if (command instanceof AbstractRunnable) { + return new ContextPreservingAbstractRunnable((AbstractRunnable) command); + } + return new ContextPreservingRunnable(command); + } + + /** + * Unwraps a command that was previously wrapped by {@link #preserveContext(Runnable)}. + */ + public Runnable unwrap(Runnable command) { + if (command instanceof ContextPreservingAbstractRunnable) { + return ((ContextPreservingAbstractRunnable) command).unwrap(); + } + if (command instanceof ContextPreservingRunnable) { + return ((ContextPreservingRunnable) command).unwrap(); + } + return command; + } + public interface StoredContext extends AutoCloseable { @Override void close(); @@ -356,4 +386,104 @@ public final class ThreadContext implements Closeable, Writeable readList(XContentParser parser, MapFactory mapFactory) throws IOException { XContentParser.Token token = parser.currentToken(); + if (token == null) { + token = parser.nextToken(); + } if (token == XContentParser.Token.FIELD_NAME) { token = parser.nextToken(); } if (token == XContentParser.Token.START_ARRAY) { token = parser.nextToken(); + } else { + throw new ElasticsearchParseException("Failed to parse list: expecting " + + XContentParser.Token.START_ARRAY + " but got " + token); } + ArrayList list = new ArrayList<>(); - for (; token != XContentParser.Token.END_ARRAY; token = parser.nextToken()) { + for (; token != null && token != XContentParser.Token.END_ARRAY; token = parser.nextToken()) { list.add(readValue(parser, mapFactory, token)); } return list; diff --git a/core/src/main/java/org/elasticsearch/discovery/Discovery.java b/core/src/main/java/org/elasticsearch/discovery/Discovery.java index b96417381ff..778e2d15053 100644 --- a/core/src/main/java/org/elasticsearch/discovery/Discovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/Discovery.java @@ -26,7 +26,6 @@ import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.node.service.NodeService; import java.io.IOException; @@ -41,11 +40,6 @@ public interface Discovery extends LifecycleComponent { String nodeDescription(); - /** - * Here as a hack to solve dep injection problem... - */ - void setNodeService(@Nullable NodeService nodeService); - /** * Another hack to solve dep injection problem..., note, this will be called before * any start is called. diff --git a/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java index e08757a3f2a..4076b880d6f 100644 --- a/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java +++ b/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -23,6 +23,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.multibindings.Multibinder; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.ExtensionPoint; import org.elasticsearch.discovery.local.LocalDiscovery; @@ -45,10 +46,11 @@ import java.util.function.Function; */ public class DiscoveryModule extends AbstractModule { - public static final Setting DISCOVERY_TYPE_SETTING = new Setting<>("discovery.type", - settings -> DiscoveryNode.localNode(settings) ? "local" : "zen", Function.identity(), false, Setting.Scope.CLUSTER); - public static final Setting ZEN_MASTER_SERVICE_TYPE_SETTING = new Setting<>("discovery.zen.masterservice.type", - "zen", Function.identity(), false, Setting.Scope.CLUSTER); + public static final Setting DISCOVERY_TYPE_SETTING = + new Setting<>("discovery.type", settings -> DiscoveryNode.localNode(settings) ? "local" : "zen", Function.identity(), + Property.NodeScope); + public static final Setting ZEN_MASTER_SERVICE_TYPE_SETTING = + new Setting<>("discovery.zen.masterservice.type", "zen", Function.identity(), Property.NodeScope); private final Settings settings; private final Map>> unicastHostProviders = new HashMap<>(); diff --git a/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java b/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java index b899f0a8afc..ca7ab342cd5 100644 --- a/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java +++ b/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java @@ -24,6 +24,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.rest.RestStatus; @@ -42,16 +43,25 @@ public class DiscoverySettings extends AbstractComponent { * sets the timeout for a complete publishing cycle, including both sending and committing. the master * will continue to process the next cluster state update after this time has elapsed **/ - public static final Setting PUBLISH_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.zen.publish_timeout", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER); + public static final Setting PUBLISH_TIMEOUT_SETTING = + Setting.positiveTimeSetting("discovery.zen.publish_timeout", TimeValue.timeValueSeconds(30), + Property.Dynamic, Property.NodeScope); /** * sets the timeout for receiving enough acks for a specific cluster state and committing it. failing * to receive responses within this window will cause the cluster state change to be rejected. */ - public static final Setting COMMIT_TIMEOUT_SETTING = new Setting<>("discovery.zen.commit_timeout", (s) -> PUBLISH_TIMEOUT_SETTING.getRaw(s), (s) -> TimeValue.parseTimeValue(s, TimeValue.timeValueSeconds(30), "discovery.zen.commit_timeout"), true, Setting.Scope.CLUSTER); - public static final Setting NO_MASTER_BLOCK_SETTING = new Setting<>("discovery.zen.no_master_block", "write", DiscoverySettings::parseNoMasterBlock, true, Setting.Scope.CLUSTER); - public static final Setting PUBLISH_DIFF_ENABLE_SETTING = Setting.boolSetting("discovery.zen.publish_diff.enable", true, true, Setting.Scope.CLUSTER); - public static final Setting INITIAL_STATE_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.initial_state_timeout", TimeValue.timeValueSeconds(30), false, Setting.Scope.CLUSTER); + public static final Setting COMMIT_TIMEOUT_SETTING = + new Setting<>("discovery.zen.commit_timeout", (s) -> PUBLISH_TIMEOUT_SETTING.getRaw(s), + (s) -> TimeValue.parseTimeValue(s, TimeValue.timeValueSeconds(30), "discovery.zen.commit_timeout"), + Property.Dynamic, Property.NodeScope); + public static final Setting NO_MASTER_BLOCK_SETTING = + new Setting<>("discovery.zen.no_master_block", "write", DiscoverySettings::parseNoMasterBlock, + Property.Dynamic, Property.NodeScope); + public static final Setting PUBLISH_DIFF_ENABLE_SETTING = + Setting.boolSetting("discovery.zen.publish_diff.enable", true, Property.Dynamic, Property.NodeScope); + public static final Setting INITIAL_STATE_TIMEOUT_SETTING = + Setting.positiveTimeSetting("discovery.initial_state_timeout", TimeValue.timeValueSeconds(30), Property.NodeScope); private volatile ClusterBlock noMasterBlock; private volatile TimeValue publishTimeout; diff --git a/core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java index 661de5260c1..cf697871d35 100644 --- a/core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java @@ -21,7 +21,6 @@ package org.elasticsearch.discovery.local; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.Diff; @@ -31,9 +30,9 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.inject.internal.Nullable; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.ClusterSettings; @@ -45,7 +44,6 @@ import org.elasticsearch.discovery.BlockingClusterStatePublishResponseHandler; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.DiscoveryStats; -import org.elasticsearch.node.service.NodeService; import java.util.HashSet; import java.util.Queue; @@ -84,11 +82,6 @@ public class LocalDiscovery extends AbstractLifecycleComponent implem this.discoverySettings = new DiscoverySettings(settings, clusterSettings); } - @Override - public void setNodeService(@Nullable NodeService nodeService) { - // nothing to do here - } - @Override public void setRoutingService(RoutingService routingService) { this.routingService = routingService; diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/DiscoveryNodesProvider.java b/core/src/main/java/org/elasticsearch/discovery/zen/DiscoveryNodesProvider.java index f845cbe1fed..b9ce7901369 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/DiscoveryNodesProvider.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/DiscoveryNodesProvider.java @@ -20,8 +20,6 @@ package org.elasticsearch.discovery.zen; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.node.service.NodeService; /** * @@ -30,6 +28,4 @@ public interface DiscoveryNodesProvider { DiscoveryNodes nodes(); - @Nullable - NodeService nodeService(); } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java b/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java index 19a2cf06bf4..0edbf8841ad 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java @@ -19,7 +19,6 @@ package org.elasticsearch.discovery.zen; import org.elasticsearch.ElasticsearchTimeoutException; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.NotMasterException; @@ -28,7 +27,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.cluster.service.InternalClusterService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; @@ -346,7 +345,7 @@ public class NodeJoinController extends AbstractComponent { } private void assertClusterStateThread() { - assert clusterService instanceof InternalClusterService == false || ((InternalClusterService) clusterService).assertClusterStateThread(); + assert clusterService instanceof ClusterService == false || ((ClusterService) clusterService).assertClusterStateThread(); } } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index ab5d61e7aa0..dabe1257dcb 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -24,7 +24,6 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.NotMasterException; @@ -35,7 +34,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.cluster.service.InternalClusterService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.Lifecycle; @@ -46,6 +45,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.Discovery; @@ -60,7 +60,6 @@ import org.elasticsearch.discovery.zen.ping.ZenPing; import org.elasticsearch.discovery.zen.ping.ZenPingService; import org.elasticsearch.discovery.zen.publish.PendingClusterStateStats; import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction; -import org.elasticsearch.node.service.NodeService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.EmptyTransportResponseHandler; import org.elasticsearch.transport.TransportChannel; @@ -87,17 +86,28 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; */ public class ZenDiscovery extends AbstractLifecycleComponent implements Discovery, PingContextProvider { - public final static Setting PING_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.zen.ping_timeout", timeValueSeconds(3), false, Setting.Scope.CLUSTER); - public final static Setting JOIN_TIMEOUT_SETTING = Setting.timeSetting("discovery.zen.join_timeout", - settings -> TimeValue.timeValueMillis(PING_TIMEOUT_SETTING.get(settings).millis() * 20).toString(), TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER); - public final static Setting JOIN_RETRY_ATTEMPTS_SETTING = Setting.intSetting("discovery.zen.join_retry_attempts", 3, 1, false, Setting.Scope.CLUSTER); - public final static Setting JOIN_RETRY_DELAY_SETTING = Setting.positiveTimeSetting("discovery.zen.join_retry_delay", TimeValue.timeValueMillis(100), false, Setting.Scope.CLUSTER); - public final static Setting MAX_PINGS_FROM_ANOTHER_MASTER_SETTING = Setting.intSetting("discovery.zen.max_pings_from_another_master", 3, 1, false, Setting.Scope.CLUSTER); - public final static Setting SEND_LEAVE_REQUEST_SETTING = Setting.boolSetting("discovery.zen.send_leave_request", true, false, Setting.Scope.CLUSTER); - public final static Setting MASTER_ELECTION_FILTER_CLIENT_SETTING = Setting.boolSetting("discovery.zen.master_election.filter_client", true, false, Setting.Scope.CLUSTER); - public final static Setting MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING = Setting.timeSetting("discovery.zen.master_election.wait_for_joins_timeout", - settings -> TimeValue.timeValueMillis(JOIN_TIMEOUT_SETTING.get(settings).millis() / 2).toString(), TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER); - public final static Setting MASTER_ELECTION_FILTER_DATA_SETTING = Setting.boolSetting("discovery.zen.master_election.filter_data", false, false, Setting.Scope.CLUSTER); + public final static Setting PING_TIMEOUT_SETTING = + Setting.positiveTimeSetting("discovery.zen.ping_timeout", timeValueSeconds(3), Property.NodeScope); + public final static Setting JOIN_TIMEOUT_SETTING = + Setting.timeSetting("discovery.zen.join_timeout", + settings -> TimeValue.timeValueMillis(PING_TIMEOUT_SETTING.get(settings).millis() * 20).toString(), + TimeValue.timeValueMillis(0), Property.NodeScope); + public final static Setting JOIN_RETRY_ATTEMPTS_SETTING = + Setting.intSetting("discovery.zen.join_retry_attempts", 3, 1, Property.NodeScope); + public final static Setting JOIN_RETRY_DELAY_SETTING = + Setting.positiveTimeSetting("discovery.zen.join_retry_delay", TimeValue.timeValueMillis(100), Property.NodeScope); + public final static Setting MAX_PINGS_FROM_ANOTHER_MASTER_SETTING = + Setting.intSetting("discovery.zen.max_pings_from_another_master", 3, 1, Property.NodeScope); + public final static Setting SEND_LEAVE_REQUEST_SETTING = + Setting.boolSetting("discovery.zen.send_leave_request", true, Property.NodeScope); + public final static Setting MASTER_ELECTION_FILTER_CLIENT_SETTING = + Setting.boolSetting("discovery.zen.master_election.filter_client", true, Property.NodeScope); + public final static Setting MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING = + Setting.timeSetting("discovery.zen.master_election.wait_for_joins_timeout", + settings -> TimeValue.timeValueMillis(JOIN_TIMEOUT_SETTING.get(settings).millis() / 2).toString(), TimeValue.timeValueMillis(0), + Property.NodeScope); + public final static Setting MASTER_ELECTION_FILTER_DATA_SETTING = + Setting.boolSetting("discovery.zen.master_election.filter_data", false, Property.NodeScope); public static final String DISCOVERY_REJOIN_ACTION_NAME = "internal:discovery/zen/rejoin"; @@ -137,10 +147,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen /** counts the time this node has joined the cluster or have elected it self as master */ private final AtomicLong clusterJoinsCounter = new AtomicLong(); - @Nullable - private NodeService nodeService; - - // must initialized in doStart(), when we have the routingService set private volatile NodeJoinController nodeJoinController; @@ -192,11 +198,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen transportService.registerRequestHandler(DISCOVERY_REJOIN_ACTION_NAME, RejoinClusterRequest::new, ThreadPool.Names.SAME, new RejoinClusterRequestHandler()); } - @Override - public void setNodeService(@Nullable NodeService nodeService) { - this.nodeService = nodeService; - } - @Override public void setRoutingService(RoutingService routingService) { this.routingService = routingService; @@ -292,11 +293,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen return clusterService.state().nodes(); } - @Override - public NodeService nodeService() { - return this.nodeService; - } - @Override public boolean nodeHasJoinedClusterOnce() { return clusterJoinsCounter.get() > 0; @@ -838,7 +834,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen return null; } if (logger.isTraceEnabled()) { - StringBuilder sb = new StringBuilder("full ping responses:"); + StringBuilder sb = new StringBuilder(); if (fullPingResponses.length == 0) { sb.append(" {none}"); } else { @@ -846,7 +842,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen sb.append("\n\t--> ").append(pingResponse); } } - logger.trace(sb.toString()); + logger.trace("full ping responses:{}", sb); } // filter responses @@ -864,7 +860,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen } if (logger.isDebugEnabled()) { - StringBuilder sb = new StringBuilder("filtered ping responses: (filter_client[").append(masterElectionFilterClientNodes).append("], filter_data[").append(masterElectionFilterDataNodes).append("])"); + StringBuilder sb = new StringBuilder(); if (pingResponses.isEmpty()) { sb.append(" {none}"); } else { @@ -872,7 +868,8 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen sb.append("\n\t--> ").append(pingResponse); } } - logger.debug(sb.toString()); + logger.debug("filtered ping responses: (filter_client[{}], filter_data[{}]){}", masterElectionFilterClientNodes, + masterElectionFilterDataNodes, sb); } final DiscoveryNode localNode = clusterService.localNode(); @@ -932,9 +929,9 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen protected ClusterState rejoin(ClusterState clusterState, String reason) { // *** called from within an cluster state update task *** // - assert Thread.currentThread().getName().contains(InternalClusterService.UPDATE_THREAD_NAME); + assert Thread.currentThread().getName().contains(ClusterService.UPDATE_THREAD_NAME); - logger.warn(reason + ", current nodes: {}", clusterState.nodes()); + logger.warn("{}, current nodes: {}", reason, clusterState.nodes()); nodesFD.stop(); masterFD.stop(reason); @@ -962,7 +959,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen private ClusterState handleAnotherMaster(ClusterState localClusterState, final DiscoveryNode otherMaster, long otherClusterStateVersion, String reason) { assert localClusterState.nodes().localNodeMaster() : "handleAnotherMaster called but current node is not a master"; - assert Thread.currentThread().getName().contains(InternalClusterService.UPDATE_THREAD_NAME) : "not called from the cluster state update thread"; + assert Thread.currentThread().getName().contains(ClusterService.UPDATE_THREAD_NAME) : "not called from the cluster state update thread"; if (otherClusterStateVersion > localClusterState.version()) { return rejoin(localClusterState, "zen-disco-discovered another master with a new cluster_state [" + otherMaster + "][" + reason + "]"); @@ -1200,7 +1197,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen } private void assertClusterStateThread() { - assert clusterService instanceof InternalClusterService == false || ((InternalClusterService) clusterService).assertClusterStateThread(); + assert clusterService instanceof ClusterService == false || ((ClusterService) clusterService).assertClusterStateThread(); } } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java b/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java index 1482fb92a22..a3da8be5a94 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; @@ -40,7 +41,8 @@ import java.util.List; */ public class ElectMasterService extends AbstractComponent { - public static final Setting DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING = Setting.intSetting("discovery.zen.minimum_master_nodes", -1, true, Setting.Scope.CLUSTER); + public static final Setting DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING = + Setting.intSetting("discovery.zen.minimum_master_nodes", -1, Property.Dynamic, Property.NodeScope); // This is the minimum version a master needs to be on, otherwise it gets ignored // This is based on the minimum compatible version of the current version this node is on diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/fd/FaultDetection.java b/core/src/main/java/org/elasticsearch/discovery/zen/fd/FaultDetection.java index 62b0250315c..1cfd46634a5 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/fd/FaultDetection.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/fd/FaultDetection.java @@ -22,7 +22,7 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Scope; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.threadpool.ThreadPool; @@ -37,11 +37,16 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; */ public abstract class FaultDetection extends AbstractComponent { - public static final Setting CONNECT_ON_NETWORK_DISCONNECT_SETTING = Setting.boolSetting("discovery.zen.fd.connect_on_network_disconnect", false, false, Scope.CLUSTER); - public static final Setting PING_INTERVAL_SETTING = Setting.positiveTimeSetting("discovery.zen.fd.ping_interval", timeValueSeconds(1), false, Scope.CLUSTER); - public static final Setting PING_TIMEOUT_SETTING = Setting.timeSetting("discovery.zen.fd.ping_timeout", timeValueSeconds(30), false, Scope.CLUSTER); - public static final Setting PING_RETRIES_SETTING = Setting.intSetting("discovery.zen.fd.ping_retries", 3, false, Scope.CLUSTER); - public static final Setting REGISTER_CONNECTION_LISTENER_SETTING = Setting.boolSetting("discovery.zen.fd.register_connection_listener", true, false, Scope.CLUSTER); + public static final Setting CONNECT_ON_NETWORK_DISCONNECT_SETTING = + Setting.boolSetting("discovery.zen.fd.connect_on_network_disconnect", false, Property.NodeScope); + public static final Setting PING_INTERVAL_SETTING = + Setting.positiveTimeSetting("discovery.zen.fd.ping_interval", timeValueSeconds(1), Property.NodeScope); + public static final Setting PING_TIMEOUT_SETTING = + Setting.timeSetting("discovery.zen.fd.ping_timeout", timeValueSeconds(30), Property.NodeScope); + public static final Setting PING_RETRIES_SETTING = + Setting.intSetting("discovery.zen.fd.ping_retries", 3, Property.NodeScope); + public static final Setting REGISTER_CONNECTION_LISTENER_SETTING = + Setting.boolSetting("discovery.zen.fd.register_connection_listener", true, Property.NodeScope); protected final ThreadPool threadPool; protected final ClusterName clusterName; diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java b/core/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java index 73be1d3bb28..96ed7f76419 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java @@ -21,12 +21,12 @@ package org.elasticsearch.discovery.zen.fd; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.NotMasterException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java b/core/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java index 04af8207c37..de4caf664ea 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.discovery.zen.membership; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java b/core/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java index 427abca8d85..0e9b81ad1fc 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; @@ -86,8 +87,11 @@ import static org.elasticsearch.discovery.zen.ping.ZenPing.PingResponse.readPing public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPing { public static final String ACTION_NAME = "internal:discovery/zen/unicast"; - public static final Setting> DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING = Setting.listSetting("discovery.zen.ping.unicast.hosts", Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER); - public static final Setting DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING = Setting.intSetting("discovery.zen.ping.unicast.concurrent_connects", 10, 0, false, Setting.Scope.CLUSTER); + public static final Setting> DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING = + Setting.listSetting("discovery.zen.ping.unicast.hosts", Collections.emptyList(), Function.identity(), + Property.NodeScope); + public static final Setting DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING = + Setting.intSetting("discovery.zen.ping.unicast.concurrent_connects", 10, 0, Property.NodeScope); // these limits are per-address public static final int LIMIT_FOREIGN_PORTS_COUNT = 1; diff --git a/core/src/main/java/org/elasticsearch/env/Environment.java b/core/src/main/java/org/elasticsearch/env/Environment.java index 1f8cffc97f3..e022ce6ad2f 100644 --- a/core/src/main/java/org/elasticsearch/env/Environment.java +++ b/core/src/main/java/org/elasticsearch/env/Environment.java @@ -23,6 +23,7 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import java.io.IOException; @@ -46,15 +47,17 @@ import static org.elasticsearch.common.Strings.cleanPath; // TODO: move PathUtils to be package-private here instead of // public+forbidden api! public class Environment { - public static final Setting PATH_HOME_SETTING = Setting.simpleString("path.home", false, Setting.Scope.CLUSTER); - public static final Setting PATH_CONF_SETTING = Setting.simpleString("path.conf", false, Setting.Scope.CLUSTER); - public static final Setting PATH_SCRIPTS_SETTING = Setting.simpleString("path.scripts", false, Setting.Scope.CLUSTER); - public static final Setting> PATH_DATA_SETTING = Setting.listSetting("path.data", Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER); - public static final Setting PATH_LOGS_SETTING = Setting.simpleString("path.logs", false, Setting.Scope.CLUSTER); - public static final Setting PATH_PLUGINS_SETTING = Setting.simpleString("path.plugins", false, Setting.Scope.CLUSTER); - public static final Setting> PATH_REPO_SETTING = Setting.listSetting("path.repo", Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER); - public static final Setting PATH_SHARED_DATA_SETTING = Setting.simpleString("path.shared_data", false, Setting.Scope.CLUSTER); - public static final Setting PIDFILE_SETTING = Setting.simpleString("pidfile", false, Setting.Scope.CLUSTER); + public static final Setting PATH_HOME_SETTING = Setting.simpleString("path.home", Property.NodeScope); + public static final Setting PATH_CONF_SETTING = Setting.simpleString("path.conf", Property.NodeScope); + public static final Setting PATH_SCRIPTS_SETTING = Setting.simpleString("path.scripts", Property.NodeScope); + public static final Setting> PATH_DATA_SETTING = + Setting.listSetting("path.data", Collections.emptyList(), Function.identity(), Property.NodeScope); + public static final Setting PATH_LOGS_SETTING = Setting.simpleString("path.logs", Property.NodeScope); + public static final Setting PATH_PLUGINS_SETTING = Setting.simpleString("path.plugins", Property.NodeScope); + public static final Setting> PATH_REPO_SETTING = + Setting.listSetting("path.repo", Collections.emptyList(), Function.identity(), Property.NodeScope); + public static final Setting PATH_SHARED_DATA_SETTING = Setting.simpleString("path.shared_data", Property.NodeScope); + public static final Setting PIDFILE_SETTING = Setting.simpleString("pidfile", Property.NodeScope); private final Settings settings; diff --git a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 0eec5c5765e..c6eec09b1c8 100644 --- a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -36,7 +36,7 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Scope; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -49,7 +49,6 @@ import org.elasticsearch.index.store.FsDirectoryService; import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.monitor.fs.FsProbe; import org.elasticsearch.monitor.jvm.JvmInfo; -import org.elasticsearch.monitor.process.ProcessProbe; import java.io.Closeable; import java.io.IOException; @@ -71,7 +70,6 @@ import java.util.Set; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.stream.Collectors; import static java.util.Collections.unmodifiableSet; @@ -90,7 +88,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl * not running on Linux, or we hit an exception trying), True means the device possibly spins and False means it does not. */ public final Boolean spins; - public NodePath(Path path, Environment environment) throws IOException { + public NodePath(Path path) throws IOException { this.path = path; this.indicesPath = path.resolve(INDICES_FOLDER); this.fileStore = Environment.getFileStore(path); @@ -103,16 +101,18 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl /** * Resolves the given shards directory against this NodePath + * ${data.paths}/nodes/{node.id}/indices/{index.uuid}/{shard.id} */ public Path resolve(ShardId shardId) { return resolve(shardId.getIndex()).resolve(Integer.toString(shardId.id())); } /** - * Resolves the given indexes directory against this NodePath + * Resolves index directory against this NodePath + * ${data.paths}/nodes/{node.id}/indices/{index.uuid} */ public Path resolve(Index index) { - return indicesPath.resolve(index.getName()); + return indicesPath.resolve(index.getUUID()); } @Override @@ -132,25 +132,25 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl private final int localNodeId; private final AtomicBoolean closed = new AtomicBoolean(false); - private final Map shardLocks = new HashMap<>(); + private final Map shardLocks = new HashMap<>(); /** * Maximum number of data nodes that should run in an environment. */ - public static final Setting MAX_LOCAL_STORAGE_NODES_SETTING = Setting.intSetting("node.max_local_storage_nodes", 50, 1, false, - Scope.CLUSTER); + public static final Setting MAX_LOCAL_STORAGE_NODES_SETTING = Setting.intSetting("node.max_local_storage_nodes", 50, 1, + Property.NodeScope); /** * If true automatically append node id to custom data paths. */ - public static final Setting ADD_NODE_ID_TO_CUSTOM_PATH = Setting.boolSetting("node.add_id_to_custom_path", true, false, - Scope.CLUSTER); + public static final Setting ADD_NODE_ID_TO_CUSTOM_PATH = + Setting.boolSetting("node.add_id_to_custom_path", true, Property.NodeScope); /** * If true the [verbose] SegmentInfos.infoStream logging is sent to System.out. */ - public static final Setting ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING = Setting - .boolSetting("node.enable_lucene_segment_infos_trace", false, false, Scope.CLUSTER); + public static final Setting ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING = + Setting.boolSetting("node.enable_lucene_segment_infos_trace", false, Property.NodeScope); public static final String NODES_FOLDER = "nodes"; public static final String INDICES_FOLDER = "indices"; @@ -187,7 +187,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl logger.trace("obtaining node lock on {} ...", dir.toAbsolutePath()); try { locks[dirIndex] = luceneDir.obtainLock(NODE_LOCK_FILENAME); - nodePaths[dirIndex] = new NodePath(dir, environment); + nodePaths[dirIndex] = new NodePath(dir); localNodeId = possibleLockId; } catch (LockObtainFailedException ex) { logger.trace("failed to obtain node lock on {}", dir.toAbsolutePath()); @@ -225,7 +225,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl maybeLogPathDetails(); maybeLogHeapDetails(); - + applySegmentInfosTrace(settings); assertCanWrite(); success = true; @@ -250,7 +250,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl // We do some I/O in here, so skip this if DEBUG/INFO are not enabled: if (logger.isDebugEnabled()) { // Log one line per path.data: - StringBuilder sb = new StringBuilder("node data locations details:"); + StringBuilder sb = new StringBuilder(); for (NodePath nodePath : nodePaths) { sb.append('\n').append(" -> ").append(nodePath.path.toAbsolutePath()); @@ -278,7 +278,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl .append(fsPath.getType()) .append(']'); } - logger.debug(sb.toString()); + logger.debug("node data locations details:{}", sb); } else if (logger.isInfoEnabled()) { FsInfo.Path totFSPath = new FsInfo.Path(); Set allTypes = new HashSet<>(); @@ -306,14 +306,8 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl } // Just log a 1-line summary: - logger.info(String.format(Locale.ROOT, - "using [%d] data paths, mounts [%s], net usable_space [%s], net total_space [%s], spins? [%s], types [%s]", - nodePaths.length, - allMounts, - totFSPath.getAvailable(), - totFSPath.getTotal(), - toString(allSpins), - toString(allTypes))); + logger.info("using [{}] data paths, mounts [{}], net usable_space [{}], net total_space [{}], spins? [{}], types [{}]", + nodePaths.length, allMounts, totFSPath.getAvailable(), totFSPath.getTotal(), toString(allSpins), toString(allTypes)); } } @@ -452,11 +446,11 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl * @param indexSettings settings for the index being deleted */ public void deleteIndexDirectoryUnderLock(Index index, IndexSettings indexSettings) throws IOException { - final Path[] indexPaths = indexPaths(index.getName()); + final Path[] indexPaths = indexPaths(index); logger.trace("deleting index {} directory, paths({}): [{}]", index, indexPaths.length, indexPaths); IOUtils.rm(indexPaths); if (indexSettings.hasCustomDataPath()) { - Path customLocation = resolveCustomLocation(indexSettings, index.getName()); + Path customLocation = resolveIndexCustomLocation(indexSettings); logger.trace("deleting custom index {} directory [{}]", index, customLocation); IOUtils.rm(customLocation); } @@ -524,17 +518,16 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl */ public ShardLock shardLock(final ShardId shardId, long lockTimeoutMS) throws IOException { logger.trace("acquiring node shardlock on [{}], timeout [{}]", shardId, lockTimeoutMS); - final ShardLockKey shardLockKey = new ShardLockKey(shardId); final InternalShardLock shardLock; final boolean acquired; synchronized (shardLocks) { - if (shardLocks.containsKey(shardLockKey)) { - shardLock = shardLocks.get(shardLockKey); + if (shardLocks.containsKey(shardId)) { + shardLock = shardLocks.get(shardId); shardLock.incWaitCount(); acquired = false; } else { - shardLock = new InternalShardLock(shardLockKey); - shardLocks.put(shardLockKey, shardLock); + shardLock = new InternalShardLock(shardId); + shardLocks.put(shardId, shardLock); acquired = true; } } @@ -554,7 +547,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl @Override protected void closeInternal() { shardLock.release(); - logger.trace("released shard lock for [{}]", shardLockKey); + logger.trace("released shard lock for [{}]", shardId); } }; } @@ -566,51 +559,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl */ public Set lockedShards() { synchronized (shardLocks) { - Set lockedShards = shardLocks.keySet().stream() - .map(shardLockKey -> new ShardId(new Index(shardLockKey.indexName, "_na_"), shardLockKey.shardId)).collect(Collectors.toSet()); - return unmodifiableSet(lockedShards); - } - } - - // a key for the shard lock. we can't use shardIds, because the contain - // the index uuid, but we want the lock semantics to the same as we map indices to disk folders, i.e., without the uuid (for now). - private final class ShardLockKey { - final String indexName; - final int shardId; - - public ShardLockKey(final ShardId shardId) { - this.indexName = shardId.getIndexName(); - this.shardId = shardId.id(); - } - - @Override - public String toString() { - return "[" + indexName + "][" + shardId + "]"; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - - ShardLockKey that = (ShardLockKey) o; - - if (shardId != that.shardId) { - return false; - } - return indexName.equals(that.indexName); - - } - - @Override - public int hashCode() { - int result = indexName.hashCode(); - result = 31 * result + shardId; - return result; + return unmodifiableSet(new HashSet<>(shardLocks.keySet())); } } @@ -623,10 +572,10 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl */ private final Semaphore mutex = new Semaphore(1); private int waitCount = 1; // guarded by shardLocks - private final ShardLockKey lockKey; + private final ShardId shardId; - InternalShardLock(ShardLockKey id) { - lockKey = id; + InternalShardLock(ShardId shardId) { + this.shardId = shardId; mutex.acquireUninterruptibly(); } @@ -646,10 +595,10 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl synchronized (shardLocks) { assert waitCount > 0 : "waitCount is " + waitCount + " but should be > 0"; --waitCount; - logger.trace("shard lock wait count for [{}] is now [{}]", lockKey, waitCount); + logger.trace("shard lock wait count for {} is now [{}]", shardId, waitCount); if (waitCount == 0) { - logger.trace("last shard lock wait decremented, removing lock for [{}]", lockKey); - InternalShardLock remove = shardLocks.remove(lockKey); + logger.trace("last shard lock wait decremented, removing lock for {}", shardId); + InternalShardLock remove = shardLocks.remove(shardId); assert remove != null : "Removed lock was null"; } } @@ -658,11 +607,11 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl void acquire(long timeoutInMillis) throws LockObtainFailedException{ try { if (mutex.tryAcquire(timeoutInMillis, TimeUnit.MILLISECONDS) == false) { - throw new LockObtainFailedException("Can't lock shard " + lockKey + ", timed out after " + timeoutInMillis + "ms"); + throw new LockObtainFailedException("Can't lock shard " + shardId + ", timed out after " + timeoutInMillis + "ms"); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); - throw new LockObtainFailedException("Can't lock shard " + lockKey + ", interrupted", e); + throw new LockObtainFailedException("Can't lock shard " + shardId + ", interrupted", e); } } } @@ -705,11 +654,11 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl /** * Returns all index paths. */ - public Path[] indexPaths(String indexName) { + public Path[] indexPaths(Index index) { assert assertEnvIsLocked(); Path[] indexPaths = new Path[nodePaths.length]; for (int i = 0; i < nodePaths.length; i++) { - indexPaths[i] = nodePaths[i].indicesPath.resolve(indexName); + indexPaths[i] = nodePaths[i].resolve(index); } return indexPaths; } @@ -732,25 +681,47 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl return shardLocations; } - public Set findAllIndices() throws IOException { + /** + * Returns all folder names in ${data.paths}/nodes/{node.id}/indices folder + */ + public Set availableIndexFolders() throws IOException { if (nodePaths == null || locks == null) { throw new IllegalStateException("node is not configured to store local location"); } assert assertEnvIsLocked(); - Set indices = new HashSet<>(); + Set indexFolders = new HashSet<>(); for (NodePath nodePath : nodePaths) { Path indicesLocation = nodePath.indicesPath; if (Files.isDirectory(indicesLocation)) { try (DirectoryStream stream = Files.newDirectoryStream(indicesLocation)) { for (Path index : stream) { if (Files.isDirectory(index)) { - indices.add(index.getFileName().toString()); + indexFolders.add(index.getFileName().toString()); } } } } } - return indices; + return indexFolders; + + } + + /** + * Resolves all existing paths to indexFolderName in ${data.paths}/nodes/{node.id}/indices + */ + public Path[] resolveIndexFolder(String indexFolderName) throws IOException { + if (nodePaths == null || locks == null) { + throw new IllegalStateException("node is not configured to store local location"); + } + assert assertEnvIsLocked(); + List paths = new ArrayList<>(nodePaths.length); + for (NodePath nodePath : nodePaths) { + Path indexFolder = nodePath.indicesPath.resolve(indexFolderName); + if (Files.exists(indexFolder)) { + paths.add(indexFolder); + } + } + return paths.toArray(new Path[paths.size()]); } /** @@ -768,13 +739,13 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl } assert assertEnvIsLocked(); final Set shardIds = new HashSet<>(); - String indexName = index.getName(); + final String indexUniquePathId = index.getUUID(); for (final NodePath nodePath : nodePaths) { Path location = nodePath.indicesPath; if (Files.isDirectory(location)) { try (DirectoryStream indexStream = Files.newDirectoryStream(location)) { for (Path indexPath : indexStream) { - if (indexName.equals(indexPath.getFileName().toString())) { + if (indexUniquePathId.equals(indexPath.getFileName().toString())) { shardIds.addAll(findAllShardsForIndex(indexPath, index)); } } @@ -785,7 +756,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl } private static Set findAllShardsForIndex(Path indexPath, Index index) throws IOException { - assert indexPath.getFileName().toString().equals(index.getName()); + assert indexPath.getFileName().toString().equals(index.getUUID()); Set shardIds = new HashSet<>(); if (Files.isDirectory(indexPath)) { try (DirectoryStream stream = Files.newDirectoryStream(indexPath)) { @@ -868,7 +839,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl * * @param indexSettings settings for the index */ - private Path resolveCustomLocation(IndexSettings indexSettings) { + public Path resolveBaseCustomLocation(IndexSettings indexSettings) { String customDataDir = indexSettings.customDataPath(); if (customDataDir != null) { // This assert is because this should be caught by MetaDataCreateIndexService @@ -889,10 +860,9 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl * the root path for the index. * * @param indexSettings settings for the index - * @param indexName index to resolve the path for */ - private Path resolveCustomLocation(IndexSettings indexSettings, final String indexName) { - return resolveCustomLocation(indexSettings).resolve(indexName); + private Path resolveIndexCustomLocation(IndexSettings indexSettings) { + return resolveBaseCustomLocation(indexSettings).resolve(indexSettings.getUUID()); } /** @@ -904,7 +874,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl * @param shardId shard to resolve the path to */ public Path resolveCustomLocation(IndexSettings indexSettings, final ShardId shardId) { - return resolveCustomLocation(indexSettings, shardId.getIndexName()).resolve(Integer.toString(shardId.id())); + return resolveIndexCustomLocation(indexSettings).resolve(Integer.toString(shardId.id())); } /** @@ -928,22 +898,24 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl for (Path path : nodeDataPaths()) { // check node-paths are writable tryWriteTempFile(path); } - for (String index : this.findAllIndices()) { - for (Path path : this.indexPaths(index)) { // check index paths are writable - Path statePath = path.resolve(MetaDataStateFormat.STATE_DIR_NAME); - tryWriteTempFile(statePath); - tryWriteTempFile(path); - } - for (ShardId shardID : this.findAllShardIds(new Index(index, IndexMetaData.INDEX_UUID_NA_VALUE))) { - Path[] paths = this.availableShardPaths(shardID); - for (Path path : paths) { // check shard paths are writable - Path indexDir = path.resolve(ShardPath.INDEX_FOLDER_NAME); - Path statePath = path.resolve(MetaDataStateFormat.STATE_DIR_NAME); - Path translogDir = path.resolve(ShardPath.TRANSLOG_FOLDER_NAME); - tryWriteTempFile(indexDir); - tryWriteTempFile(translogDir); - tryWriteTempFile(statePath); - tryWriteTempFile(path); + for (String indexFolderName : this.availableIndexFolders()) { + for (Path indexPath : this.resolveIndexFolder(indexFolderName)) { // check index paths are writable + Path indexStatePath = indexPath.resolve(MetaDataStateFormat.STATE_DIR_NAME); + tryWriteTempFile(indexStatePath); + tryWriteTempFile(indexPath); + try (DirectoryStream stream = Files.newDirectoryStream(indexPath)) { + for (Path shardPath : stream) { + String fileName = shardPath.getFileName().toString(); + if (Files.isDirectory(shardPath) && fileName.chars().allMatch(Character::isDigit)) { + Path indexDir = shardPath.resolve(ShardPath.INDEX_FOLDER_NAME); + Path statePath = shardPath.resolve(MetaDataStateFormat.STATE_DIR_NAME); + Path translogDir = shardPath.resolve(ShardPath.TRANSLOG_FOLDER_NAME); + tryWriteTempFile(indexDir); + tryWriteTempFile(translogDir); + tryWriteTempFile(statePath); + tryWriteTempFile(shardPath); + } + } } } } diff --git a/core/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java b/core/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java index 757a78c3b5f..1ccdb43cc45 100644 --- a/core/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java +++ b/core/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java @@ -269,7 +269,7 @@ public abstract class AsyncShardFetch implements Rel */ // visible for testing void asyncFetch(final ShardId shardId, final String[] nodesIds, final MetaData metaData) { - IndexMetaData indexMetaData = metaData.index(shardId.getIndex()); + IndexMetaData indexMetaData = metaData.getIndexSafe(shardId.getIndex()); logger.trace("{} fetching [{}] from {}", shardId, type, nodesIds); action.list(shardId, indexMetaData, nodesIds, new ActionListener>() { @Override diff --git a/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java b/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java index e2fcb56b1e1..b4d8eeae532 100644 --- a/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java +++ b/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java @@ -19,6 +19,7 @@ package org.elasticsearch.gateway; +import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.component.AbstractComponent; @@ -26,12 +27,17 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.index.Index; +import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; @@ -47,7 +53,7 @@ public class DanglingIndicesState extends AbstractComponent { private final MetaStateService metaStateService; private final LocalAllocateDangledIndices allocateDangledIndices; - private final Map danglingIndices = ConcurrentCollections.newConcurrentMap(); + private final Map danglingIndices = ConcurrentCollections.newConcurrentMap(); @Inject public DanglingIndicesState(Settings settings, NodeEnvironment nodeEnv, MetaStateService metaStateService, @@ -74,7 +80,7 @@ public class DanglingIndicesState extends AbstractComponent { /** * The current set of dangling indices. */ - Map getDanglingIndices() { + Map getDanglingIndices() { // This might be a good use case for CopyOnWriteHashMap return unmodifiableMap(new HashMap<>(danglingIndices)); } @@ -83,10 +89,16 @@ public class DanglingIndicesState extends AbstractComponent { * Cleans dangling indices if they are already allocated on the provided meta data. */ void cleanupAllocatedDangledIndices(MetaData metaData) { - for (String danglingIndex : danglingIndices.keySet()) { - if (metaData.hasIndex(danglingIndex)) { - logger.debug("[{}] no longer dangling (created), removing from dangling list", danglingIndex); - danglingIndices.remove(danglingIndex); + for (Index index : danglingIndices.keySet()) { + final IndexMetaData indexMetaData = metaData.index(index); + if (indexMetaData != null && indexMetaData.getIndex().getName().equals(index.getName())) { + if (indexMetaData.getIndex().getUUID().equals(index.getUUID()) == false) { + logger.warn("[{}] can not be imported as a dangling index, as there is already another index " + + "with the same name but a different uuid. local index will be ignored (but not deleted)", index); + } else { + logger.debug("[{}] no longer dangling (created), removing from dangling list", index); + } + danglingIndices.remove(index); } } } @@ -104,36 +116,30 @@ public class DanglingIndicesState extends AbstractComponent { * that have state on disk, but are not part of the provided meta data, or not detected * as dangled already. */ - Map findNewDanglingIndices(MetaData metaData) { - final Set indices; + Map findNewDanglingIndices(MetaData metaData) { + final Set excludeIndexPathIds = new HashSet<>(metaData.indices().size() + danglingIndices.size()); + for (ObjectCursor cursor : metaData.indices().values()) { + excludeIndexPathIds.add(cursor.value.getIndex().getUUID()); + } + excludeIndexPathIds.addAll(danglingIndices.keySet().stream().map(Index::getUUID).collect(Collectors.toList())); try { - indices = nodeEnv.findAllIndices(); - } catch (Throwable e) { + final List indexMetaDataList = metaStateService.loadIndicesStates(excludeIndexPathIds::contains); + Map newIndices = new HashMap<>(indexMetaDataList.size()); + for (IndexMetaData indexMetaData : indexMetaDataList) { + if (metaData.hasIndex(indexMetaData.getIndex().getName())) { + logger.warn("[{}] can not be imported as a dangling index, as index with same name already exists in cluster metadata", + indexMetaData.getIndex()); + } else { + logger.info("[{}] dangling index, exists on local file system, but not in cluster metadata, auto import to cluster state", + indexMetaData.getIndex()); + newIndices.put(indexMetaData.getIndex(), indexMetaData); + } + } + return newIndices; + } catch (IOException e) { logger.warn("failed to list dangling indices", e); return emptyMap(); } - - Map newIndices = new HashMap<>(); - for (String indexName : indices) { - if (metaData.hasIndex(indexName) == false && danglingIndices.containsKey(indexName) == false) { - try { - IndexMetaData indexMetaData = metaStateService.loadIndexState(indexName); - if (indexMetaData != null) { - logger.info("[{}] dangling index, exists on local file system, but not in cluster metadata, auto import to cluster state", indexName); - if (!indexMetaData.getIndex().getName().equals(indexName)) { - logger.info("dangled index directory name is [{}], state name is [{}], renaming to directory name", indexName, indexMetaData.getIndex()); - indexMetaData = IndexMetaData.builder(indexMetaData).index(indexName).build(); - } - newIndices.put(indexName, indexMetaData); - } else { - logger.debug("[{}] dangling index directory detected, but no state found", indexName); - } - } catch (Throwable t) { - logger.warn("[{}] failed to load index state for detected dangled index", t, indexName); - } - } - } - return newIndices; } /** diff --git a/core/src/main/java/org/elasticsearch/gateway/Gateway.java b/core/src/main/java/org/elasticsearch/gateway/Gateway.java index fd3bd9a0b6d..f5d38112c4f 100644 --- a/core/src/main/java/org/elasticsearch/gateway/Gateway.java +++ b/core/src/main/java/org/elasticsearch/gateway/Gateway.java @@ -25,15 +25,16 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.lucene.util.IOUtils; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.index.Index; import java.nio.file.Path; import java.util.function.Supplier; @@ -79,7 +80,7 @@ public class Gateway extends AbstractComponent implements ClusterStateListener { } } - ObjectFloatHashMap indices = new ObjectFloatHashMap<>(); + ObjectFloatHashMap indices = new ObjectFloatHashMap<>(); MetaData electedGlobalState = null; int found = 0; for (TransportNodesListGatewayMetaState.NodeGatewayMetaState nodeState : nodesState) { @@ -93,7 +94,7 @@ public class Gateway extends AbstractComponent implements ClusterStateListener { electedGlobalState = nodeState.metaData(); } for (ObjectCursor cursor : nodeState.metaData().indices().values()) { - indices.addTo(cursor.value.getIndex().getName(), 1); + indices.addTo(cursor.value.getIndex(), 1); } } if (found < requiredAllocation) { @@ -107,7 +108,7 @@ public class Gateway extends AbstractComponent implements ClusterStateListener { final Object[] keys = indices.keys; for (int i = 0; i < keys.length; i++) { if (keys[i] != null) { - String index = (String) keys[i]; + Index index = (Index) keys[i]; IndexMetaData electedIndexMetaData = null; int indexMetaDataCount = 0; for (TransportNodesListGatewayMetaState.NodeGatewayMetaState nodeState : nodesState) { diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java b/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java index acd650bc6f7..0059a0ef61b 100644 --- a/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java @@ -22,7 +22,6 @@ package org.elasticsearch.gateway; import org.elasticsearch.action.support.nodes.BaseNodeResponse; import org.elasticsearch.action.support.nodes.BaseNodesResponse; import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingNodes; @@ -31,6 +30,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lease.Releasables; diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index e90cb750cf5..950b4351e1d 100644 --- a/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -34,7 +34,9 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.IndexFolderUpgrader; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.index.Index; import java.nio.file.DirectoryStream; import java.nio.file.Files; @@ -61,7 +63,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL @Nullable private volatile MetaData previousMetaData; - private volatile Set previouslyWrittenIndices = emptySet(); + private volatile Set previouslyWrittenIndices = emptySet(); @Inject public GatewayMetaState(Settings settings, NodeEnvironment nodeEnv, MetaStateService metaStateService, @@ -85,6 +87,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL try { ensureNoPre019State(); pre20Upgrade(); + IndexFolderUpgrader.upgradeIndicesIfNeeded(settings, nodeEnv); long startNS = System.nanoTime(); metaStateService.loadFullState(); logger.debug("took {} to load state", TimeValue.timeValueMillis(TimeValue.nsecToMSec(System.nanoTime() - startNS))); @@ -102,7 +105,6 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL @Override public void clusterChanged(ClusterChangedEvent event) { - Set relevantIndices = new HashSet<>(); final ClusterState state = event.state(); if (state.blocks().disableStatePersistence()) { // reset the current metadata, we need to start fresh... @@ -113,7 +115,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL MetaData newMetaData = state.metaData(); // we don't check if metaData changed, since we might be called several times and we need to check dangling... - + Set relevantIndices = Collections.emptySet(); boolean success = true; // write the state if this node is a master eligible node or if it is a data node and has shards allocated on it if (state.nodes().localNode().masterNode() || state.nodes().localNode().dataNode()) { @@ -126,14 +128,14 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL // persistence was disabled or the node was restarted), see getRelevantIndicesOnDataOnlyNode(). // we therefore have to check here if we have shards on disk and add their indices to the previouslyWrittenIndices list if (isDataOnlyNode(state)) { - Set newPreviouslyWrittenIndices = new HashSet<>(previouslyWrittenIndices.size()); + Set newPreviouslyWrittenIndices = new HashSet<>(previouslyWrittenIndices.size()); for (IndexMetaData indexMetaData : newMetaData) { IndexMetaData indexMetaDataOnDisk = null; if (indexMetaData.getState().equals(IndexMetaData.State.CLOSE)) { - indexMetaDataOnDisk = metaStateService.loadIndexState(indexMetaData.getIndex().getName()); + indexMetaDataOnDisk = metaStateService.loadIndexState(indexMetaData.getIndex()); } if (indexMetaDataOnDisk != null) { - newPreviouslyWrittenIndices.add(indexMetaDataOnDisk.getIndex().getName()); + newPreviouslyWrittenIndices.add(indexMetaDataOnDisk.getIndex()); } } newPreviouslyWrittenIndices.addAll(previouslyWrittenIndices); @@ -152,13 +154,13 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL } } - Iterable writeInfo; + relevantIndices = getRelevantIndices(event.state(), event.previousState(), previouslyWrittenIndices); - writeInfo = resolveStatesToBeWritten(previouslyWrittenIndices, relevantIndices, previousMetaData, event.state().metaData()); + final Iterable writeInfo = resolveStatesToBeWritten(previouslyWrittenIndices, relevantIndices, previousMetaData, event.state().metaData()); // check and write changes in indices for (IndexMetaWriteInfo indexMetaWrite : writeInfo) { try { - metaStateService.writeIndex(indexMetaWrite.reason, indexMetaWrite.newMetaData, indexMetaWrite.previousMetaData); + metaStateService.writeIndex(indexMetaWrite.reason, indexMetaWrite.newMetaData); } catch (Throwable e) { success = false; } @@ -166,15 +168,14 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL } danglingIndicesState.processDanglingIndices(newMetaData); - if (success) { previousMetaData = newMetaData; previouslyWrittenIndices = unmodifiableSet(relevantIndices); } } - public static Set getRelevantIndices(ClusterState state, ClusterState previousState, Set previouslyWrittenIndices) { - Set relevantIndices; + public static Set getRelevantIndices(ClusterState state, ClusterState previousState, Set previouslyWrittenIndices) { + Set relevantIndices; if (isDataOnlyNode(state)) { relevantIndices = getRelevantIndicesOnDataOnlyNode(state, previousState, previouslyWrittenIndices); } else if (state.nodes().localNode().masterNode() == true) { @@ -202,7 +203,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL try (DirectoryStream stream = Files.newDirectoryStream(stateLocation)) { for (Path stateFile : stream) { if (logger.isTraceEnabled()) { - logger.trace("[upgrade]: processing [" + stateFile.getFileName() + "]"); + logger.trace("[upgrade]: processing [{}]", stateFile.getFileName()); } final String name = stateFile.getFileName().toString(); if (name.startsWith("metadata-")) { @@ -233,7 +234,8 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL // We successfully checked all indices for backward compatibility and found no non-upgradable indices, which // means the upgrade can continue. Now it's safe to overwrite index metadata with the new version. for (IndexMetaData indexMetaData : updateIndexMetaData) { - metaStateService.writeIndex("upgrade", indexMetaData, null); + // since we still haven't upgraded the index folders, we write index state in the old folder + metaStateService.writeIndex("upgrade", indexMetaData, nodeEnv.resolveIndexFolder(indexMetaData.getIndex().getName())); } } @@ -264,10 +266,10 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL * @param newMetaData The new metadata * @return iterable over all indices states that should be written to disk */ - public static Iterable resolveStatesToBeWritten(Set previouslyWrittenIndices, Set potentiallyUnwrittenIndices, MetaData previousMetaData, MetaData newMetaData) { + public static Iterable resolveStatesToBeWritten(Set previouslyWrittenIndices, Set potentiallyUnwrittenIndices, MetaData previousMetaData, MetaData newMetaData) { List indicesToWrite = new ArrayList<>(); - for (String index : potentiallyUnwrittenIndices) { - IndexMetaData newIndexMetaData = newMetaData.index(index); + for (Index index : potentiallyUnwrittenIndices) { + IndexMetaData newIndexMetaData = newMetaData.getIndexSafe(index); IndexMetaData previousIndexMetaData = previousMetaData == null ? null : previousMetaData.index(index); String writeReason = null; if (previouslyWrittenIndices.contains(index) == false || previousIndexMetaData == null) { @@ -282,14 +284,14 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL return indicesToWrite; } - public static Set getRelevantIndicesOnDataOnlyNode(ClusterState state, ClusterState previousState, Set previouslyWrittenIndices) { + public static Set getRelevantIndicesOnDataOnlyNode(ClusterState state, ClusterState previousState, Set previouslyWrittenIndices) { RoutingNode newRoutingNode = state.getRoutingNodes().node(state.nodes().localNodeId()); if (newRoutingNode == null) { throw new IllegalStateException("cluster state does not contain this node - cannot write index meta state"); } - Set indices = new HashSet<>(); + Set indices = new HashSet<>(); for (ShardRouting routing : newRoutingNode) { - indices.add(routing.index().getName()); + indices.add(routing.index()); } // we have to check the meta data also: closed indices will not appear in the routing table, but we must still write the state if we have it written on disk previously for (IndexMetaData indexMetaData : state.metaData()) { @@ -300,19 +302,19 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL if (previousMetaData != null) { isOrWasClosed = isOrWasClosed || previousMetaData.getState().equals(IndexMetaData.State.CLOSE); } - if (previouslyWrittenIndices.contains(indexMetaData.getIndex().getName()) && isOrWasClosed) { - indices.add(indexMetaData.getIndex().getName()); + if (previouslyWrittenIndices.contains(indexMetaData.getIndex()) && isOrWasClosed) { + indices.add(indexMetaData.getIndex()); } } return indices; } - public static Set getRelevantIndicesForMasterEligibleNode(ClusterState state) { - Set relevantIndices; + public static Set getRelevantIndicesForMasterEligibleNode(ClusterState state) { + Set relevantIndices; relevantIndices = new HashSet<>(); // we have to iterate over the metadata to make sure we also capture closed indices for (IndexMetaData indexMetaData : state.metaData()) { - relevantIndices.add(indexMetaData.getIndex().getName()); + relevantIndices.add(indexMetaData.getIndex()); } return relevantIndices; } diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayService.java b/core/src/main/java/org/elasticsearch/gateway/GatewayService.java index 79f23c1b37e..16d67a84c4a 100644 --- a/core/src/main/java/org/elasticsearch/gateway/GatewayService.java +++ b/core/src/main/java/org/elasticsearch/gateway/GatewayService.java @@ -21,7 +21,6 @@ package org.elasticsearch.gateway; import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ClusterStateUpdateTask; @@ -34,11 +33,14 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.rest.RestStatus; @@ -51,20 +53,20 @@ import java.util.concurrent.atomic.AtomicBoolean; */ public class GatewayService extends AbstractLifecycleComponent implements ClusterStateListener { - public static final Setting EXPECTED_NODES_SETTING = Setting.intSetting( - "gateway.expected_nodes", -1, -1, false, Setting.Scope.CLUSTER); - public static final Setting EXPECTED_DATA_NODES_SETTING = Setting.intSetting( - "gateway.expected_data_nodes", -1, -1, false, Setting.Scope.CLUSTER); - public static final Setting EXPECTED_MASTER_NODES_SETTING = Setting.intSetting( - "gateway.expected_master_nodes", -1, -1, false, Setting.Scope.CLUSTER); - public static final Setting RECOVER_AFTER_TIME_SETTING = Setting.positiveTimeSetting( - "gateway.recover_after_time", TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER); - public static final Setting RECOVER_AFTER_NODES_SETTING = Setting.intSetting( - "gateway.recover_after_nodes", -1, -1, false, Setting.Scope.CLUSTER); - public static final Setting RECOVER_AFTER_DATA_NODES_SETTING = Setting.intSetting( - "gateway.recover_after_data_nodes", -1, -1, false, Setting.Scope.CLUSTER); - public static final Setting RECOVER_AFTER_MASTER_NODES_SETTING = Setting.intSetting( - "gateway.recover_after_master_nodes", 0, 0, false, Setting.Scope.CLUSTER); + public static final Setting EXPECTED_NODES_SETTING = + Setting.intSetting("gateway.expected_nodes", -1, -1, Property.NodeScope); + public static final Setting EXPECTED_DATA_NODES_SETTING = + Setting.intSetting("gateway.expected_data_nodes", -1, -1, Property.NodeScope); + public static final Setting EXPECTED_MASTER_NODES_SETTING = + Setting.intSetting("gateway.expected_master_nodes", -1, -1, Property.NodeScope); + public static final Setting RECOVER_AFTER_TIME_SETTING = + Setting.positiveTimeSetting("gateway.recover_after_time", TimeValue.timeValueMillis(0), Property.NodeScope); + public static final Setting RECOVER_AFTER_NODES_SETTING = + Setting.intSetting("gateway.recover_after_nodes", -1, -1, Property.NodeScope); + public static final Setting RECOVER_AFTER_DATA_NODES_SETTING = + Setting.intSetting("gateway.recover_after_data_nodes", -1, -1, Property.NodeScope); + public static final Setting RECOVER_AFTER_MASTER_NODES_SETTING = + Setting.intSetting("gateway.recover_after_master_nodes", 0, 0, Property.NodeScope); public static final ClusterBlock STATE_NOT_RECOVERED_BLOCK = new ClusterBlock(1, "state not recovered / initialized", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL); @@ -160,11 +162,14 @@ public class GatewayService extends AbstractLifecycleComponent i if (state.nodes().masterNodeId() == null) { logger.debug("not recovering from gateway, no master elected yet"); } else if (recoverAfterNodes != -1 && (nodes.masterAndDataNodes().size()) < recoverAfterNodes) { - logger.debug("not recovering from gateway, nodes_size (data+master) [" + nodes.masterAndDataNodes().size() + "] < recover_after_nodes [" + recoverAfterNodes + "]"); + logger.debug("not recovering from gateway, nodes_size (data+master) [{}] < recover_after_nodes [{}]", + nodes.masterAndDataNodes().size(), recoverAfterNodes); } else if (recoverAfterDataNodes != -1 && nodes.dataNodes().size() < recoverAfterDataNodes) { - logger.debug("not recovering from gateway, nodes_size (data) [" + nodes.dataNodes().size() + "] < recover_after_data_nodes [" + recoverAfterDataNodes + "]"); + logger.debug("not recovering from gateway, nodes_size (data) [{}] < recover_after_data_nodes [{}]", + nodes.dataNodes().size(), recoverAfterDataNodes); } else if (recoverAfterMasterNodes != -1 && nodes.masterNodes().size() < recoverAfterMasterNodes) { - logger.debug("not recovering from gateway, nodes_size (master) [" + nodes.masterNodes().size() + "] < recover_after_master_nodes [" + recoverAfterMasterNodes + "]"); + logger.debug("not recovering from gateway, nodes_size (master) [{}] < recover_after_master_nodes [{}]", + nodes.masterNodes().size(), recoverAfterMasterNodes); } else { boolean enforceRecoverAfterTime; String reason; @@ -206,7 +211,20 @@ public class GatewayService extends AbstractLifecycleComponent i } } else { if (recovered.compareAndSet(false, true)) { - threadPool.generic().execute(() -> gateway.performStateRecovery(recoveryListener)); + threadPool.generic().execute(new AbstractRunnable() { + @Override + public void onFailure(Throwable t) { + logger.warn("Recovery failed", t); + // we reset `recovered` in the listener don't reset it here otherwise there might be a race + // that resets it to false while a new recover is already running? + recoveryListener.onFailure("state recovery failed: " + t.getMessage()); + } + + @Override + protected void doRun() throws Exception { + gateway.performStateRecovery(recoveryListener); + } + }); } } } diff --git a/core/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java b/core/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java index 041b8cafecc..b14dcc6d1a4 100644 --- a/core/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java +++ b/core/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java @@ -19,7 +19,6 @@ package org.elasticsearch.gateway; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.block.ClusterBlocks; @@ -30,6 +29,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; diff --git a/core/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java b/core/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java index c3c1cd3b734..8795a7e7d15 100644 --- a/core/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java +++ b/core/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java @@ -116,7 +116,7 @@ public abstract class MetaDataStateFormat { final Path finalStatePath = stateLocation.resolve(fileName); try { final String resourceDesc = "MetaDataStateFormat.write(path=\"" + tmpStatePath + "\")"; - try (OutputStreamIndexOutput out = new OutputStreamIndexOutput(resourceDesc, Files.newOutputStream(tmpStatePath), BUFFER_SIZE)) { + try (OutputStreamIndexOutput out = new OutputStreamIndexOutput(resourceDesc, fileName, Files.newOutputStream(tmpStatePath), BUFFER_SIZE)) { CodecUtil.writeHeader(out, STATE_FILE_CODEC, STATE_FILE_VERSION); out.writeInt(format.index()); out.writeLong(version); diff --git a/core/src/main/java/org/elasticsearch/gateway/MetaStateService.java b/core/src/main/java/org/elasticsearch/gateway/MetaStateService.java index 9ef09753c43..0edfb563174 100644 --- a/core/src/main/java/org/elasticsearch/gateway/MetaStateService.java +++ b/core/src/main/java/org/elasticsearch/gateway/MetaStateService.java @@ -25,58 +25,26 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; import java.io.IOException; -import java.util.HashMap; -import java.util.Map; -import java.util.Set; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; +import java.util.function.Predicate; /** * Handles writing and loading both {@link MetaData} and {@link IndexMetaData} */ public class MetaStateService extends AbstractComponent { - static final String FORMAT_SETTING = "gateway.format"; - - static final String GLOBAL_STATE_FILE_PREFIX = "global-"; - private static final String INDEX_STATE_FILE_PREFIX = "state-"; - private final NodeEnvironment nodeEnv; - private final XContentType format; - private final ToXContent.Params formatParams; - private final ToXContent.Params gatewayModeFormatParams; - private final MetaDataStateFormat indexStateFormat; - private final MetaDataStateFormat globalStateFormat; - @Inject public MetaStateService(Settings settings, NodeEnvironment nodeEnv) { super(settings); this.nodeEnv = nodeEnv; - this.format = XContentType.fromMediaTypeOrFormat(settings.get(FORMAT_SETTING, "smile")); - if (this.format == XContentType.SMILE) { - Map params = new HashMap<>(); - params.put("binary", "true"); - formatParams = new ToXContent.MapParams(params); - Map gatewayModeParams = new HashMap<>(); - gatewayModeParams.put("binary", "true"); - gatewayModeParams.put(MetaData.CONTEXT_MODE_PARAM, MetaData.CONTEXT_MODE_GATEWAY); - gatewayModeFormatParams = new ToXContent.MapParams(gatewayModeParams); - } else { - formatParams = ToXContent.EMPTY_PARAMS; - Map gatewayModeParams = new HashMap<>(); - gatewayModeParams.put(MetaData.CONTEXT_MODE_PARAM, MetaData.CONTEXT_MODE_GATEWAY); - gatewayModeFormatParams = new ToXContent.MapParams(gatewayModeParams); - } - indexStateFormat = indexStateFormat(format, formatParams); - globalStateFormat = globalStateFormat(format, gatewayModeFormatParams); - } /** @@ -91,14 +59,12 @@ public class MetaStateService extends AbstractComponent { } else { metaDataBuilder = MetaData.builder(); } - - final Set indices = nodeEnv.findAllIndices(); - for (String index : indices) { - IndexMetaData indexMetaData = loadIndexState(index); - if (indexMetaData == null) { - logger.debug("[{}] failed to find metadata for existing index location", index); - } else { + for (String indexFolderName : nodeEnv.availableIndexFolders()) { + IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, nodeEnv.resolveIndexFolder(indexFolderName)); + if (indexMetaData != null) { metaDataBuilder.put(indexMetaData, false); + } else { + logger.debug("[{}] failed to find metadata for existing index location", indexFolderName); } } return metaDataBuilder.build(); @@ -108,15 +74,40 @@ public class MetaStateService extends AbstractComponent { * Loads the index state for the provided index name, returning null if doesn't exists. */ @Nullable - IndexMetaData loadIndexState(String index) throws IOException { - return indexStateFormat.loadLatestState(logger, nodeEnv.indexPaths(index)); + IndexMetaData loadIndexState(Index index) throws IOException { + return IndexMetaData.FORMAT.loadLatestState(logger, nodeEnv.indexPaths(index)); + } + + /** + * Loads all indices states available on disk + */ + List loadIndicesStates(Predicate excludeIndexPathIdsPredicate) throws IOException { + List indexMetaDataList = new ArrayList<>(); + for (String indexFolderName : nodeEnv.availableIndexFolders()) { + if (excludeIndexPathIdsPredicate.test(indexFolderName)) { + continue; + } + IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, + nodeEnv.resolveIndexFolder(indexFolderName)); + if (indexMetaData != null) { + final String indexPathId = indexMetaData.getIndex().getUUID(); + if (indexFolderName.equals(indexPathId)) { + indexMetaDataList.add(indexMetaData); + } else { + throw new IllegalStateException("[" + indexFolderName+ "] invalid index folder name, rename to [" + indexPathId + "]"); + } + } else { + logger.debug("[{}] failed to find metadata for existing index location", indexFolderName); + } + } + return indexMetaDataList; } /** * Loads the global state, *without* index state, see {@link #loadFullState()} for that. */ MetaData loadGlobalState() throws IOException { - MetaData globalState = globalStateFormat.loadLatestState(logger, nodeEnv.nodeDataPaths()); + MetaData globalState = MetaData.FORMAT.loadLatestState(logger, nodeEnv.nodeDataPaths()); // ES 2.0 now requires units for all time and byte-sized settings, so we add the default unit if it's missing // TODO: can we somehow only do this for pre-2.0 cluster state? if (globalState != null) { @@ -129,13 +120,22 @@ public class MetaStateService extends AbstractComponent { /** * Writes the index state. */ - void writeIndex(String reason, IndexMetaData indexMetaData, @Nullable IndexMetaData previousIndexMetaData) throws Exception { - logger.trace("[{}] writing state, reason [{}]", indexMetaData.getIndex(), reason); + void writeIndex(String reason, IndexMetaData indexMetaData) throws IOException { + writeIndex(reason, indexMetaData, nodeEnv.indexPaths(indexMetaData.getIndex())); + } + + /** + * Writes the index state in locations, use {@link #writeGlobalState(String, MetaData)} + * to write index state in index paths + */ + void writeIndex(String reason, IndexMetaData indexMetaData, Path[] locations) throws IOException { + final Index index = indexMetaData.getIndex(); + logger.trace("[{}] writing state, reason [{}]", index, reason); try { - indexStateFormat.write(indexMetaData, indexMetaData.getVersion(), nodeEnv.indexPaths(indexMetaData.getIndex().getName())); + IndexMetaData.FORMAT.write(indexMetaData, indexMetaData.getVersion(), locations); } catch (Throwable ex) { - logger.warn("[{}]: failed to write index state", ex, indexMetaData.getIndex()); - throw new IOException("failed to write state for [" + indexMetaData.getIndex() + "]", ex); + logger.warn("[{}]: failed to write index state", ex, index); + throw new IOException("failed to write state for [" + index + "]", ex); } } @@ -145,45 +145,10 @@ public class MetaStateService extends AbstractComponent { void writeGlobalState(String reason, MetaData metaData) throws Exception { logger.trace("[_global] writing state, reason [{}]", reason); try { - globalStateFormat.write(metaData, metaData.version(), nodeEnv.nodeDataPaths()); + MetaData.FORMAT.write(metaData, metaData.version(), nodeEnv.nodeDataPaths()); } catch (Throwable ex) { logger.warn("[_global]: failed to write global state", ex); throw new IOException("failed to write global state", ex); } } - - /** - * Returns a StateFormat that can read and write {@link MetaData} - */ - static MetaDataStateFormat globalStateFormat(XContentType format, final ToXContent.Params formatParams) { - return new MetaDataStateFormat(format, GLOBAL_STATE_FILE_PREFIX) { - - @Override - public void toXContent(XContentBuilder builder, MetaData state) throws IOException { - MetaData.Builder.toXContent(state, builder, formatParams); - } - - @Override - public MetaData fromXContent(XContentParser parser) throws IOException { - return MetaData.Builder.fromXContent(parser); - } - }; - } - - /** - * Returns a StateFormat that can read and write {@link IndexMetaData} - */ - static MetaDataStateFormat indexStateFormat(XContentType format, final ToXContent.Params formatParams) { - return new MetaDataStateFormat(format, INDEX_STATE_FILE_PREFIX) { - - @Override - public void toXContent(XContentBuilder builder, IndexMetaData state) throws IOException { - IndexMetaData.Builder.toXContent(state, builder, formatParams); } - - @Override - public IndexMetaData fromXContent(XContentParser parser) throws IOException { - return IndexMetaData.Builder.fromXContent(parser); - } - }; - } } diff --git a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java index ed61aa2c1fd..a456da0779d 100644 --- a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java @@ -31,6 +31,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.gateway.TransportNodesListGatewayStartedShards.NodeGatewayStartedShards; import org.elasticsearch.index.shard.ShardStateMetaData; @@ -67,9 +68,13 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { } }; - public static final Setting NODE_INITIAL_SHARDS_SETTING = new Setting<>("gateway.initial_shards", (settings) -> settings.get("gateway.local.initial_shards", "quorum"), INITIAL_SHARDS_PARSER, true, Setting.Scope.CLUSTER); + public static final Setting NODE_INITIAL_SHARDS_SETTING = + new Setting<>("gateway.initial_shards", (settings) -> settings.get("gateway.local.initial_shards", "quorum"), INITIAL_SHARDS_PARSER, + Property.Dynamic, Property.NodeScope); @Deprecated - public static final Setting INDEX_RECOVERY_INITIAL_SHARDS_SETTING = new Setting<>("index.recovery.initial_shards", (settings) -> NODE_INITIAL_SHARDS_SETTING.get(settings) , INITIAL_SHARDS_PARSER, true, Setting.Scope.INDEX); + public static final Setting INDEX_RECOVERY_INITIAL_SHARDS_SETTING = + new Setting<>("index.recovery.initial_shards", (settings) -> NODE_INITIAL_SHARDS_SETTING.get(settings) , INITIAL_SHARDS_PARSER, + Property.Dynamic, Property.IndexScope); public PrimaryShardAllocator(Settings settings) { super(settings); @@ -89,7 +94,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { continue; } - final IndexMetaData indexMetaData = metaData.index(shard.getIndexName()); + final IndexMetaData indexMetaData = metaData.getIndexSafe(shard.index()); // don't go wild here and create a new IndexSetting object for every shard this could cause a lot of garbage // on cluster restart if we allocate a boat load of shards if (shard.allocatedPostIndexCreate(indexMetaData) == false) { diff --git a/core/src/main/java/org/elasticsearch/gateway/PriorityComparator.java b/core/src/main/java/org/elasticsearch/gateway/PriorityComparator.java index 04f438c70fe..1d24baf561a 100644 --- a/core/src/main/java/org/elasticsearch/gateway/PriorityComparator.java +++ b/core/src/main/java/org/elasticsearch/gateway/PriorityComparator.java @@ -23,6 +23,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; import java.util.Comparator; @@ -42,8 +43,8 @@ public abstract class PriorityComparator implements Comparator { final String o2Index = o2.getIndexName(); int cmp = 0; if (o1Index.equals(o2Index) == false) { - final Settings settingsO1 = getIndexSettings(o1Index); - final Settings settingsO2 = getIndexSettings(o2Index); + final Settings settingsO1 = getIndexSettings(o1.index()); + final Settings settingsO2 = getIndexSettings(o2.index()); cmp = Long.compare(priority(settingsO2), priority(settingsO1)); if (cmp == 0) { cmp = Long.compare(timeCreated(settingsO2), timeCreated(settingsO1)); @@ -63,7 +64,7 @@ public abstract class PriorityComparator implements Comparator { return settings.getAsLong(IndexMetaData.SETTING_CREATION_DATE, -1L); } - protected abstract Settings getIndexSettings(String index); + protected abstract Settings getIndexSettings(Index index); /** * Returns a PriorityComparator that uses the RoutingAllocation index metadata to access the index setting per index. @@ -71,8 +72,8 @@ public abstract class PriorityComparator implements Comparator { public static PriorityComparator getAllocationComparator(final RoutingAllocation allocation) { return new PriorityComparator() { @Override - protected Settings getIndexSettings(String index) { - IndexMetaData indexMetaData = allocation.metaData().index(index); + protected Settings getIndexSettings(Index index) { + IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(index); return indexMetaData.getSettings(); } }; diff --git a/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java index e2b6f0d27ed..74511639d47 100644 --- a/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java @@ -74,7 +74,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent { } // if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one... - IndexMetaData indexMetaData = metaData.index(shard.getIndexName()); + IndexMetaData indexMetaData = metaData.getIndexSafe(shard.index()); if (shard.allocatedPostIndexCreate(indexMetaData) == false) { continue; } @@ -104,6 +104,8 @@ public abstract class ReplicaShardAllocator extends AbstractComponent { && matchingNodes.isNodeMatchBySyncID(nodeWithHighestMatch) == true) { // we found a better match that has a full sync id match, the existing allocation is not fully synced // so we found a better one, cancel this one + logger.debug("cancelling allocation of replica on [{}], sync id match found on node [{}]", + currentNode, nodeWithHighestMatch); it.moveToUnassigned(new UnassignedInfo(UnassignedInfo.Reason.REALLOCATED_REPLICA, "existing allocation of replica to [" + currentNode + "] cancelled, sync id match found on node [" + nodeWithHighestMatch + "]", null, allocation.getCurrentNanoTime(), System.currentTimeMillis())); @@ -127,7 +129,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent { } // if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one... - IndexMetaData indexMetaData = metaData.index(shard.getIndexName()); + IndexMetaData indexMetaData = metaData.getIndexSafe(shard.index()); if (shard.allocatedPostIndexCreate(indexMetaData) == false) { continue; } diff --git a/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java b/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java index fb174f4bd45..0fd1fd35809 100644 --- a/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java +++ b/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java @@ -29,10 +29,10 @@ import org.elasticsearch.action.support.nodes.BaseNodesRequest; import org.elasticsearch.action.support.nodes.BaseNodesResponse; import org.elasticsearch.action.support.nodes.TransportNodesAction; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; diff --git a/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java b/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java index 03f8dc81703..7a090208818 100644 --- a/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java +++ b/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java @@ -29,11 +29,11 @@ import org.elasticsearch.action.support.nodes.BaseNodesRequest; import org.elasticsearch.action.support.nodes.BaseNodesResponse; import org.elasticsearch.action.support.nodes.TransportNodesAction; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -125,7 +125,7 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction logger.trace("{} loading local shard state info", shardId); ShardStateMetaData shardStateMetaData = ShardStateMetaData.FORMAT.loadLatestState(logger, nodeEnv.availableShardPaths(request.shardId)); if (shardStateMetaData != null) { - final IndexMetaData metaData = clusterService.state().metaData().index(shardId.getIndexName()); // it's a mystery why this is sometimes null + final IndexMetaData metaData = clusterService.state().metaData().index(shardId.getIndex()); // it's a mystery why this is sometimes null if (metaData != null) { ShardPath shardPath = null; try { diff --git a/core/src/main/java/org/elasticsearch/http/HttpTransportSettings.java b/core/src/main/java/org/elasticsearch/http/HttpTransportSettings.java index 6c91df079b9..48af1c83965 100644 --- a/core/src/main/java/org/elasticsearch/http/HttpTransportSettings.java +++ b/core/src/main/java/org/elasticsearch/http/HttpTransportSettings.java @@ -20,42 +20,64 @@ package org.elasticsearch.http; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Scope; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.transport.PortsRange; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import java.util.List; +import java.util.function.Function; import static java.util.Collections.emptyList; import static org.elasticsearch.common.settings.Setting.listSetting; public final class HttpTransportSettings { - public static final Setting SETTING_CORS_ENABLED = Setting.boolSetting("http.cors.enabled", false, false, Scope.CLUSTER); - public static final Setting SETTING_CORS_ALLOW_ORIGIN = new Setting("http.cors.allow-origin", "", (value) -> value, false, Scope.CLUSTER); - public static final Setting SETTING_CORS_MAX_AGE = Setting.intSetting("http.cors.max-age", 1728000, false, Scope.CLUSTER); - public static final Setting SETTING_CORS_ALLOW_METHODS = new Setting("http.cors.allow-methods", "OPTIONS, HEAD, GET, POST, PUT, DELETE", (value) -> value, false, Scope.CLUSTER); - public static final Setting SETTING_CORS_ALLOW_HEADERS = new Setting("http.cors.allow-headers", "X-Requested-With, Content-Type, Content-Length", (value) -> value, false, Scope.CLUSTER); - public static final Setting SETTING_CORS_ALLOW_CREDENTIALS = Setting.boolSetting("http.cors.allow-credentials", false, false, Scope.CLUSTER); - public static final Setting SETTING_PIPELINING = Setting.boolSetting("http.pipelining", true, false, Scope.CLUSTER); - public static final Setting SETTING_PIPELINING_MAX_EVENTS = Setting.intSetting("http.pipelining.max_events", 10000, false, Scope.CLUSTER); - public static final Setting SETTING_HTTP_COMPRESSION = Setting.boolSetting("http.compression", false, false, Scope.CLUSTER); - public static final Setting SETTING_HTTP_COMPRESSION_LEVEL = Setting.intSetting("http.compression_level", 6, false, Scope.CLUSTER); - public static final Setting> SETTING_HTTP_HOST = listSetting("http.host", emptyList(), s -> s, false, Scope.CLUSTER); - public static final Setting> SETTING_HTTP_PUBLISH_HOST = listSetting("http.publish_host", SETTING_HTTP_HOST, s -> s, false, Scope.CLUSTER); - public static final Setting> SETTING_HTTP_BIND_HOST = listSetting("http.bind_host", SETTING_HTTP_HOST, s -> s, false, Scope.CLUSTER); + public static final Setting SETTING_CORS_ENABLED = + Setting.boolSetting("http.cors.enabled", false, Property.NodeScope); + public static final Setting SETTING_CORS_ALLOW_ORIGIN = + new Setting("http.cors.allow-origin", "", (value) -> value, Property.NodeScope); + public static final Setting SETTING_CORS_MAX_AGE = + Setting.intSetting("http.cors.max-age", 1728000, Property.NodeScope); + public static final Setting SETTING_CORS_ALLOW_METHODS = + new Setting("http.cors.allow-methods", "OPTIONS, HEAD, GET, POST, PUT, DELETE", (value) -> value, Property.NodeScope); + public static final Setting SETTING_CORS_ALLOW_HEADERS = + new Setting("http.cors.allow-headers", "X-Requested-With, Content-Type, Content-Length", (value) -> value, Property.NodeScope); + public static final Setting SETTING_CORS_ALLOW_CREDENTIALS = + Setting.boolSetting("http.cors.allow-credentials", false, Property.NodeScope); + public static final Setting SETTING_PIPELINING = + Setting.boolSetting("http.pipelining", true, Property.NodeScope); + public static final Setting SETTING_PIPELINING_MAX_EVENTS = + Setting.intSetting("http.pipelining.max_events", 10000, Property.NodeScope); + public static final Setting SETTING_HTTP_COMPRESSION = + Setting.boolSetting("http.compression", false, Property.NodeScope); + public static final Setting SETTING_HTTP_COMPRESSION_LEVEL = + Setting.intSetting("http.compression_level", 6, Property.NodeScope); + public static final Setting> SETTING_HTTP_HOST = + listSetting("http.host", emptyList(), Function.identity(), Property.NodeScope); + public static final Setting> SETTING_HTTP_PUBLISH_HOST = + listSetting("http.publish_host", SETTING_HTTP_HOST, Function.identity(), Property.NodeScope); + public static final Setting> SETTING_HTTP_BIND_HOST = + listSetting("http.bind_host", SETTING_HTTP_HOST, Function.identity(), Property.NodeScope); - public static final Setting SETTING_HTTP_PORT = new Setting("http.port", "9200-9300", PortsRange::new, false, Scope.CLUSTER); - public static final Setting SETTING_HTTP_PUBLISH_PORT = Setting.intSetting("http.publish_port", -1, -1, false, Scope.CLUSTER); - public static final Setting SETTING_HTTP_DETAILED_ERRORS_ENABLED = Setting.boolSetting("http.detailed_errors.enabled", true, false, Scope.CLUSTER); - public static final Setting SETTING_HTTP_MAX_CONTENT_LENGTH = Setting.byteSizeSetting("http.max_content_length", new ByteSizeValue(100, ByteSizeUnit.MB), false, Scope.CLUSTER) ; - public static final Setting SETTING_HTTP_MAX_CHUNK_SIZE = Setting.byteSizeSetting("http.max_chunk_size", new ByteSizeValue(8, ByteSizeUnit.KB), false, Scope.CLUSTER) ; - public static final Setting SETTING_HTTP_MAX_HEADER_SIZE = Setting.byteSizeSetting("http.max_header_size", new ByteSizeValue(8, ByteSizeUnit.KB), false, Scope.CLUSTER) ; - public static final Setting SETTING_HTTP_MAX_INITIAL_LINE_LENGTH = Setting.byteSizeSetting("http.max_initial_line_length", new ByteSizeValue(4, ByteSizeUnit.KB), false, Scope.CLUSTER) ; + public static final Setting SETTING_HTTP_PORT = + new Setting("http.port", "9200-9300", PortsRange::new, Property.NodeScope); + public static final Setting SETTING_HTTP_PUBLISH_PORT = + Setting.intSetting("http.publish_port", -1, -1, Property.NodeScope); + public static final Setting SETTING_HTTP_DETAILED_ERRORS_ENABLED = + Setting.boolSetting("http.detailed_errors.enabled", true, Property.NodeScope); + public static final Setting SETTING_HTTP_MAX_CONTENT_LENGTH = + Setting.byteSizeSetting("http.max_content_length", new ByteSizeValue(100, ByteSizeUnit.MB), Property.NodeScope); + public static final Setting SETTING_HTTP_MAX_CHUNK_SIZE = + Setting.byteSizeSetting("http.max_chunk_size", new ByteSizeValue(8, ByteSizeUnit.KB), Property.NodeScope); + public static final Setting SETTING_HTTP_MAX_HEADER_SIZE = + Setting.byteSizeSetting("http.max_header_size", new ByteSizeValue(8, ByteSizeUnit.KB), Property.NodeScope); + public static final Setting SETTING_HTTP_MAX_INITIAL_LINE_LENGTH = + Setting.byteSizeSetting("http.max_initial_line_length", new ByteSizeValue(4, ByteSizeUnit.KB), Property.NodeScope); // don't reset cookies by default, since I don't think we really need to // note, parsing cookies was fixed in netty 3.5.1 regarding stack allocation, but still, currently, we don't need cookies - public static final Setting SETTING_HTTP_RESET_COOKIES = Setting.boolSetting("http.reset_cookies", false, false, Scope.CLUSTER); + public static final Setting SETTING_HTTP_RESET_COOKIES = + Setting.boolSetting("http.reset_cookies", false, Property.NodeScope); private HttpTransportSettings() { } diff --git a/core/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java b/core/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java index e64c6401f71..332380d9fb1 100644 --- a/core/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java +++ b/core/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.netty.OpenChannelsHandler; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.InetSocketTransportAddress; @@ -118,33 +119,32 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent SETTING_HTTP_NETTY_MAX_CUMULATION_BUFFER_CAPACITY = - Setting.byteSizeSetting("http.netty.max_cumulation_buffer_capacity", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER); + Setting.byteSizeSetting("http.netty.max_cumulation_buffer_capacity", new ByteSizeValue(-1), + Property.NodeScope); public static Setting SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = - Setting.intSetting("http.netty.max_composite_buffer_components", -1, false, Setting.Scope.CLUSTER); + Setting.intSetting("http.netty.max_composite_buffer_components", -1, Property.NodeScope); public static final Setting SETTING_HTTP_WORKER_COUNT = new Setting<>("http.netty.worker_count", (s) -> Integer.toString(EsExecutors.boundedNumberOfProcessors(s) * 2), - (s) -> Setting.parseInt(s, 1, "http.netty.worker_count"), false, Setting.Scope.CLUSTER); + (s) -> Setting.parseInt(s, 1, "http.netty.worker_count"), Property.NodeScope); - public static final Setting SETTING_HTTP_TCP_NO_DELAY = boolSetting("http.tcp_no_delay", NetworkService.TcpSettings - .TCP_NO_DELAY, false, - Setting.Scope.CLUSTER); - public static final Setting SETTING_HTTP_TCP_KEEP_ALIVE = boolSetting("http.tcp.keep_alive", NetworkService.TcpSettings - .TCP_KEEP_ALIVE, false, - Setting.Scope.CLUSTER); - public static final Setting SETTING_HTTP_TCP_BLOCKING_SERVER = boolSetting("http.tcp.blocking_server", NetworkService - .TcpSettings.TCP_BLOCKING_SERVER, - false, Setting.Scope.CLUSTER); - public static final Setting SETTING_HTTP_TCP_REUSE_ADDRESS = boolSetting("http.tcp.reuse_address", NetworkService - .TcpSettings.TCP_REUSE_ADDRESS, - false, Setting.Scope.CLUSTER); + public static final Setting SETTING_HTTP_TCP_NO_DELAY = + boolSetting("http.tcp_no_delay", NetworkService.TcpSettings.TCP_NO_DELAY, Property.NodeScope); + public static final Setting SETTING_HTTP_TCP_KEEP_ALIVE = + boolSetting("http.tcp.keep_alive", NetworkService.TcpSettings.TCP_KEEP_ALIVE, Property.NodeScope); + public static final Setting SETTING_HTTP_TCP_BLOCKING_SERVER = + boolSetting("http.tcp.blocking_server", NetworkService.TcpSettings.TCP_BLOCKING_SERVER, Property.NodeScope); + public static final Setting SETTING_HTTP_TCP_REUSE_ADDRESS = + boolSetting("http.tcp.reuse_address", NetworkService.TcpSettings.TCP_REUSE_ADDRESS, Property.NodeScope); - public static final Setting SETTING_HTTP_TCP_SEND_BUFFER_SIZE = Setting.byteSizeSetting("http.tcp.send_buffer_size", - NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE, false, Setting.Scope.CLUSTER); - public static final Setting SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE = Setting.byteSizeSetting("http.tcp" + - ".receive_buffer_size", NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE, false, Setting.Scope.CLUSTER); - public static final Setting SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE = Setting.byteSizeSetting( - "transport.netty.receive_predictor_size", + public static final Setting SETTING_HTTP_TCP_SEND_BUFFER_SIZE = + Setting.byteSizeSetting("http.tcp.send_buffer_size", NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE, + Property.NodeScope); + public static final Setting SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE = + Setting.byteSizeSetting("http.tcp.receive_buffer_size", NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE, + Property.NodeScope); + public static final Setting SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE = + Setting.byteSizeSetting("transport.netty.receive_predictor_size", settings -> { long defaultReceiverPredictor = 512 * 1024; if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes() > 0) { @@ -154,13 +154,11 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MIN = byteSizeSetting("http.netty" + - ".receive_predictor_min", - SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE, false, Setting.Scope.CLUSTER); - public static final Setting SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MAX = byteSizeSetting("http.netty" + - ".receive_predictor_max", - SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE, false, Setting.Scope.CLUSTER); + }, Property.NodeScope); + public static final Setting SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MIN = + byteSizeSetting("http.netty.receive_predictor_min", SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE, Property.NodeScope); + public static final Setting SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MAX = + byteSizeSetting("http.netty.receive_predictor_max", SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE, Property.NodeScope); protected final NetworkService networkService; @@ -262,7 +260,7 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent Integer.MAX_VALUE) { - logger.warn("maxContentLength[" + maxContentLength + "] set to high value, resetting it to [100mb]"); + logger.warn("maxContentLength[{}] set to high value, resetting it to [100mb]", maxContentLength); maxContentLength = new ByteSizeValue(100, ByteSizeUnit.MB); } this.maxContentLength = maxContentLength; diff --git a/core/src/main/java/org/elasticsearch/index/Index.java b/core/src/main/java/org/elasticsearch/index/Index.java index 80bf3c31b44..3ffe13e38b1 100644 --- a/core/src/main/java/org/elasticsearch/index/Index.java +++ b/core/src/main/java/org/elasticsearch/index/Index.java @@ -19,6 +19,7 @@ package org.elasticsearch.index; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -30,7 +31,7 @@ import java.io.IOException; */ public class Index implements Writeable { - private final static Index PROTO = new Index("", ""); + public static final Index[] EMPTY_ARRAY = new Index[0]; private final String name; private final String uuid; @@ -40,6 +41,12 @@ public class Index implements Writeable { this.uuid = uuid.intern(); } + public Index(StreamInput in) throws IOException { + this.name = in.readString(); + this.uuid = in.readString(); + } + + public String getName() { return this.name; } @@ -50,7 +57,14 @@ public class Index implements Writeable { @Override public String toString() { - return "[" + name + "]"; + /* + * If we have a uuid we put it in the toString so it'll show up in logs which is useful as more and more things use the uuid rather + * than the name as the lookup key for the index. + */ + if (ClusterState.UNKNOWN_UUID.equals(uuid)) { + return "[" + name + "]"; + } + return "[" + name + "/" + uuid + "]"; } @Override @@ -72,13 +86,9 @@ public class Index implements Writeable { return result; } - public static Index readIndex(StreamInput in) throws IOException { - return PROTO.readFrom(in); - } - @Override public Index readFrom(StreamInput in) throws IOException { - return new Index(in.readString(), in.readString()); + return new Index(in); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/IndexModule.java b/core/src/main/java/org/elasticsearch/index/IndexModule.java index eabc0951e7f..48230e6ec1e 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexModule.java +++ b/core/src/main/java/org/elasticsearch/index/IndexModule.java @@ -22,6 +22,7 @@ package org.elasticsearch.index; import org.apache.lucene.util.SetOnce; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.analysis.AnalysisRegistry; @@ -65,13 +66,17 @@ import java.util.function.Function; */ public final class IndexModule { - public static final Setting INDEX_STORE_TYPE_SETTING = new Setting<>("index.store.type", "", Function.identity(), false, Setting.Scope.INDEX); + public static final Setting INDEX_STORE_TYPE_SETTING = + new Setting<>("index.store.type", "", Function.identity(), Property.IndexScope, Property.NodeScope); public static final String SIMILARITY_SETTINGS_PREFIX = "index.similarity"; public static final String INDEX_QUERY_CACHE = "index"; public static final String NONE_QUERY_CACHE = "none"; - public static final Setting INDEX_QUERY_CACHE_TYPE_SETTING = new Setting<>("index.queries.cache.type", INDEX_QUERY_CACHE, Function.identity(), false, Setting.Scope.INDEX); + public static final Setting INDEX_QUERY_CACHE_TYPE_SETTING = + new Setting<>("index.queries.cache.type", INDEX_QUERY_CACHE, Function.identity(), Property.IndexScope); + // for test purposes only - public static final Setting INDEX_QUERY_CACHE_EVERYTHING_SETTING = Setting.boolSetting("index.queries.cache.everything", false, false, Setting.Scope.INDEX); + public static final Setting INDEX_QUERY_CACHE_EVERYTHING_SETTING = + Setting.boolSetting("index.queries.cache.everything", false, Property.IndexScope); private final IndexSettings indexSettings; private final IndexStoreConfig indexStoreConfig; private final AnalysisRegistry analysisRegistry; @@ -83,7 +88,7 @@ public final class IndexModule { private final Map> similarities = new HashMap<>(); private final Map> storeTypes = new HashMap<>(); private final Map> queryCaches = new HashMap<>(); - + private final SetOnce forceQueryCacheType = new SetOnce<>(); public IndexModule(IndexSettings indexSettings, IndexStoreConfig indexStoreConfig, AnalysisRegistry analysisRegistry) { this.indexStoreConfig = indexStoreConfig; @@ -261,11 +266,23 @@ public final class IndexModule { } indexSettings.getScopedSettings().addSettingsUpdateConsumer(IndexStore.INDEX_STORE_THROTTLE_TYPE_SETTING, store::setType); indexSettings.getScopedSettings().addSettingsUpdateConsumer(IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING, store::setMaxRate); - final String queryCacheType = indexSettings.getValue(INDEX_QUERY_CACHE_TYPE_SETTING); + final String queryCacheType = forceQueryCacheType.get() != null ? forceQueryCacheType.get() : indexSettings.getValue(INDEX_QUERY_CACHE_TYPE_SETTING); final BiFunction queryCacheProvider = queryCaches.get(queryCacheType); final QueryCache queryCache = queryCacheProvider.apply(indexSettings, indicesQueryCache); return new IndexService(indexSettings, environment, new SimilarityService(indexSettings, similarities), shardStoreDeleter, analysisRegistry, engineFactory.get(), servicesProvider, queryCache, store, eventListener, searcherWrapperFactory, mapperRegistry, indicesFieldDataCache, listeners); } + /** + * Forces a certain query cache type. If this is set + * the given cache type is overriding the default as well as the type + * set on the index level. + * NOTE: this can only be set once + * + * @see #INDEX_QUERY_CACHE_TYPE_SETTING + */ + public void forceQueryCacheType(String type) { + this.forceQueryCacheType.set(type); + } + } diff --git a/core/src/main/java/org/elasticsearch/index/IndexService.java b/core/src/main/java/org/elasticsearch/index/IndexService.java index 6b4a1851ab5..bb73e212a77 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexService.java +++ b/core/src/main/java/org/elasticsearch/index/IndexService.java @@ -61,6 +61,7 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.percolator.PercolatorQueryCache; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.IndexEventListener; @@ -140,8 +141,9 @@ public final class IndexService extends AbstractIndexComponent implements IndexC this.indexStore = indexStore; indexFieldData.setListener(new FieldDataCacheListener(this)); this.bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetCacheListener(this)); - this.warmer = new IndexWarmer(indexSettings.getSettings(), nodeServicesProvider.getThreadPool(), bitsetFilterCache.createListener(nodeServicesProvider.getThreadPool())); - this.indexCache = new IndexCache(indexSettings, queryCache, bitsetFilterCache); + PercolatorQueryCache percolatorQueryCache = new PercolatorQueryCache(indexSettings, IndexService.this::newQueryShardContext); + this.warmer = new IndexWarmer(indexSettings.getSettings(), nodeServicesProvider.getThreadPool(), bitsetFilterCache.createListener(nodeServicesProvider.getThreadPool()), percolatorQueryCache.createListener(nodeServicesProvider.getThreadPool())); + this.indexCache = new IndexCache(indexSettings, queryCache, bitsetFilterCache, percolatorQueryCache); this.engineFactory = engineFactory; // initialize this last -- otherwise if the wrapper requires any other member to be non-null we fail with an NPE this.searcherWrapper = wrapperFactory.newWrapper(this); @@ -230,7 +232,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC } } } finally { - IOUtils.close(bitsetFilterCache, indexCache, mapperService, indexFieldData, analysisService, refreshTask, fsyncTask); + IOUtils.close(bitsetFilterCache, indexCache, mapperService, indexFieldData, analysisService, refreshTask, fsyncTask, cache().getPercolatorQueryCache()); } } } @@ -420,7 +422,11 @@ public final class IndexService extends AbstractIndexComponent implements IndexC * Creates a new QueryShardContext. The context has not types set yet, if types are required set them via {@link QueryShardContext#setTypes(String...)} */ public QueryShardContext newQueryShardContext() { - return new QueryShardContext(indexSettings, indexCache.bitsetFilterCache(), indexFieldData, mapperService(), similarityService(), nodeServicesProvider.getScriptService(), nodeServicesProvider.getIndicesQueriesRegistry()); + return new QueryShardContext( + indexSettings, indexCache.bitsetFilterCache(), indexFieldData, mapperService(), + similarityService(), nodeServicesProvider.getScriptService(), nodeServicesProvider.getIndicesQueriesRegistry(), + indexCache.getPercolatorQueryCache() + ); } ThreadPool getThreadPool() { diff --git a/core/src/main/java/org/elasticsearch/index/IndexSettings.java b/core/src/main/java/org/elasticsearch/index/IndexSettings.java index b17b8ab7edf..b996e70b1e5 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/core/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -36,7 +37,6 @@ import org.elasticsearch.index.translog.Translog; import java.util.Locale; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Predicate; @@ -50,15 +50,26 @@ import java.util.function.Predicate; */ public final class IndexSettings { - public static final Setting DEFAULT_FIELD_SETTING = new Setting<>("index.query.default_field", AllFieldMapper.NAME, Function.identity(), false, Setting.Scope.INDEX); - public static final Setting QUERY_STRING_LENIENT_SETTING = Setting.boolSetting("index.query_string.lenient", false, false, Setting.Scope.INDEX); - public static final Setting QUERY_STRING_ANALYZE_WILDCARD = Setting.boolSetting("indices.query.query_string.analyze_wildcard", false, false, Setting.Scope.CLUSTER); - public static final Setting QUERY_STRING_ALLOW_LEADING_WILDCARD = Setting.boolSetting("indices.query.query_string.allowLeadingWildcard", true, false, Setting.Scope.CLUSTER); - public static final Setting ALLOW_UNMAPPED = Setting.boolSetting("index.query.parse.allow_unmapped_fields", true, false, Setting.Scope.INDEX); - public static final Setting INDEX_TRANSLOG_SYNC_INTERVAL_SETTING = Setting.timeSetting("index.translog.sync_interval", TimeValue.timeValueSeconds(5), TimeValue.timeValueMillis(100), false, Setting.Scope.INDEX); - public static final Setting INDEX_TRANSLOG_DURABILITY_SETTING = new Setting<>("index.translog.durability", Translog.Durability.REQUEST.name(), (value) -> Translog.Durability.valueOf(value.toUpperCase(Locale.ROOT)), true, Setting.Scope.INDEX); - public static final Setting INDEX_WARMER_ENABLED_SETTING = Setting.boolSetting("index.warmer.enabled", true, true, Setting.Scope.INDEX); - public static final Setting INDEX_TTL_DISABLE_PURGE_SETTING = Setting.boolSetting("index.ttl.disable_purge", false, true, Setting.Scope.INDEX); + public static final Setting DEFAULT_FIELD_SETTING = + new Setting<>("index.query.default_field", AllFieldMapper.NAME, Function.identity(), Property.IndexScope); + public static final Setting QUERY_STRING_LENIENT_SETTING = + Setting.boolSetting("index.query_string.lenient", false, Property.IndexScope); + public static final Setting QUERY_STRING_ANALYZE_WILDCARD = + Setting.boolSetting("indices.query.query_string.analyze_wildcard", false, Property.NodeScope); + public static final Setting QUERY_STRING_ALLOW_LEADING_WILDCARD = + Setting.boolSetting("indices.query.query_string.allowLeadingWildcard", true, Property.NodeScope); + public static final Setting ALLOW_UNMAPPED = + Setting.boolSetting("index.query.parse.allow_unmapped_fields", true, Property.IndexScope); + public static final Setting INDEX_TRANSLOG_SYNC_INTERVAL_SETTING = + Setting.timeSetting("index.translog.sync_interval", TimeValue.timeValueSeconds(5), TimeValue.timeValueMillis(100), + Property.IndexScope); + public static final Setting INDEX_TRANSLOG_DURABILITY_SETTING = + new Setting<>("index.translog.durability", Translog.Durability.REQUEST.name(), + (value) -> Translog.Durability.valueOf(value.toUpperCase(Locale.ROOT)), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_WARMER_ENABLED_SETTING = + Setting.boolSetting("index.warmer.enabled", true, Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_TTL_DISABLE_PURGE_SETTING = + Setting.boolSetting("index.ttl.disable_purge", false, Property.Dynamic, Property.IndexScope); public static final Setting INDEX_CHECK_ON_STARTUP = new Setting<>("index.shard.check_on_startup", "false", (s) -> { switch(s) { case "false": @@ -69,7 +80,7 @@ public final class IndexSettings { default: throw new IllegalArgumentException("unknown value for [index.shard.check_on_startup] must be one of [true, false, fix, checksum] but was: " + s); } - }, false, Setting.Scope.INDEX); + }, Property.IndexScope); /** * Index setting describing the maximum value of from + size on a query. @@ -79,10 +90,15 @@ public final class IndexSettings { * safely. 1,000,000 is probably way to high for any cluster to set * safely. */ - public static final Setting MAX_RESULT_WINDOW_SETTING = Setting.intSetting("index.max_result_window", 10000, 1, true, Setting.Scope.INDEX); + public static final Setting MAX_RESULT_WINDOW_SETTING = + Setting.intSetting("index.max_result_window", 10000, 1, Property.Dynamic, Property.IndexScope); public static final TimeValue DEFAULT_REFRESH_INTERVAL = new TimeValue(1, TimeUnit.SECONDS); - public static final Setting INDEX_REFRESH_INTERVAL_SETTING = Setting.timeSetting("index.refresh_interval", DEFAULT_REFRESH_INTERVAL, new TimeValue(-1, TimeUnit.MILLISECONDS), true, Setting.Scope.INDEX); - public static final Setting INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING = Setting.byteSizeSetting("index.translog.flush_threshold_size", new ByteSizeValue(512, ByteSizeUnit.MB), true, Setting.Scope.INDEX); + public static final Setting INDEX_REFRESH_INTERVAL_SETTING = + Setting.timeSetting("index.refresh_interval", DEFAULT_REFRESH_INTERVAL, new TimeValue(-1, TimeUnit.MILLISECONDS), + Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING = + Setting.byteSizeSetting("index.translog.flush_threshold_size", new ByteSizeValue(512, ByteSizeUnit.MB), Property.Dynamic, + Property.IndexScope); /** @@ -90,7 +106,9 @@ public final class IndexSettings { * This setting is realtime updateable */ public static final TimeValue DEFAULT_GC_DELETES = TimeValue.timeValueSeconds(60); - public static final Setting INDEX_GC_DELETES_SETTING = Setting.timeSetting("index.gc_deletes", DEFAULT_GC_DELETES, new TimeValue(-1, TimeUnit.MILLISECONDS), true, Setting.Scope.INDEX); + public static final Setting INDEX_GC_DELETES_SETTING = + Setting.timeSetting("index.gc_deletes", DEFAULT_GC_DELETES, new TimeValue(-1, TimeUnit.MILLISECONDS), Property.Dynamic, + Property.IndexScope); private final Index index; private final Version version; diff --git a/core/src/main/java/org/elasticsearch/index/IndexWarmer.java b/core/src/main/java/org/elasticsearch/index/IndexWarmer.java index 9fabc8efc40..332fcdd380e 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexWarmer.java +++ b/core/src/main/java/org/elasticsearch/index/IndexWarmer.java @@ -19,14 +19,10 @@ package org.elasticsearch.index; -import com.carrotsearch.hppc.ObjectHashSet; -import com.carrotsearch.hppc.ObjectSet; -import com.carrotsearch.hppc.cursors.ObjectCursor; -import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.NumericDocValues; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.engine.Engine; @@ -56,14 +52,13 @@ public final class IndexWarmer extends AbstractComponent { public static final Setting INDEX_NORMS_LOADING_SETTING = new Setting<>("index.norms.loading", MappedFieldType.Loading.LAZY.toString(), (s) -> MappedFieldType.Loading.parse(s, MappedFieldType.Loading.LAZY), - false, Setting.Scope.INDEX); + Property.IndexScope); private final List listeners; IndexWarmer(Settings settings, ThreadPool threadPool, Listener... listeners) { super(settings); ArrayList list = new ArrayList<>(); final Executor executor = threadPool.executor(ThreadPool.Names.WARMER); - list.add(new NormsWarmer(executor)); list.add(new FieldDataWarmer(executor)); for (Listener listener : listeners) { list.add(listener); @@ -137,64 +132,6 @@ public final class IndexWarmer extends AbstractComponent { TerminationHandle warmTopReader(IndexShard indexShard, Engine.Searcher searcher); } - private static class NormsWarmer implements IndexWarmer.Listener { - private final Executor executor; - public NormsWarmer(Executor executor) { - this.executor = executor; - } - @Override - public TerminationHandle warmNewReaders(final IndexShard indexShard, final Engine.Searcher searcher) { - final MappedFieldType.Loading defaultLoading = indexShard.indexSettings().getValue(INDEX_NORMS_LOADING_SETTING); - final MapperService mapperService = indexShard.mapperService(); - final ObjectSet warmUp = new ObjectHashSet<>(); - for (DocumentMapper docMapper : mapperService.docMappers(false)) { - for (FieldMapper fieldMapper : docMapper.mappers()) { - final String indexName = fieldMapper.fieldType().name(); - MappedFieldType.Loading normsLoading = fieldMapper.fieldType().normsLoading(); - if (normsLoading == null) { - normsLoading = defaultLoading; - } - if (fieldMapper.fieldType().indexOptions() != IndexOptions.NONE && !fieldMapper.fieldType().omitNorms() - && normsLoading == MappedFieldType.Loading.EAGER) { - warmUp.add(indexName); - } - } - } - - final CountDownLatch latch = new CountDownLatch(1); - // Norms loading may be I/O intensive but is not CPU intensive, so we execute it in a single task - executor.execute(() -> { - try { - for (ObjectCursor stringObjectCursor : warmUp) { - final String indexName = stringObjectCursor.value; - final long start = System.nanoTime(); - for (final LeafReaderContext ctx : searcher.reader().leaves()) { - final NumericDocValues values = ctx.reader().getNormValues(indexName); - if (values != null) { - values.get(0); - } - } - if (indexShard.warmerService().logger().isTraceEnabled()) { - indexShard.warmerService().logger().trace("warmed norms for [{}], took [{}]", indexName, - TimeValue.timeValueNanos(System.nanoTime() - start)); - } - } - } catch (Throwable t) { - indexShard.warmerService().logger().warn("failed to warm-up norms", t); - } finally { - latch.countDown(); - } - }); - - return () -> latch.await(); - } - - @Override - public TerminationHandle warmTopReader(IndexShard indexShard, final Engine.Searcher searcher) { - return TerminationHandle.NO_WAIT; - } - } - private static class FieldDataWarmer implements IndexWarmer.Listener { private final Executor executor; diff --git a/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java b/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java index 5452daa7f07..ff10179f026 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java +++ b/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.engine.Engine; @@ -36,6 +37,7 @@ import java.util.concurrent.TimeUnit; /** */ public final class IndexingSlowLog implements IndexingOperationListener { + private final Index index; private boolean reformat; private long indexWarnThreshold; private long indexInfoThreshold; @@ -51,15 +53,25 @@ public final class IndexingSlowLog implements IndexingOperationListener { private SlowLogLevel level; private final ESLogger indexLogger; - private final ESLogger deleteLogger; private static final String INDEX_INDEXING_SLOWLOG_PREFIX = "index.indexing.slowlog"; - public static final Setting INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING = Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.warn", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING = Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.info", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING = Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.debug", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING = Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.trace", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING = Setting.boolSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".reformat", true, true, Setting.Scope.INDEX); - public static final Setting INDEX_INDEXING_SLOWLOG_LEVEL_SETTING = new Setting<>(INDEX_INDEXING_SLOWLOG_PREFIX +".level", SlowLogLevel.TRACE.name(), SlowLogLevel::parse, true, Setting.Scope.INDEX); + public static final Setting INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING = + Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.warn", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING = + Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.info", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING = + Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.debug", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING = + Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.trace", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING = + Setting.boolSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".reformat", true, Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_INDEXING_SLOWLOG_LEVEL_SETTING = + new Setting<>(INDEX_INDEXING_SLOWLOG_PREFIX +".level", SlowLogLevel.TRACE.name(), SlowLogLevel::parse, Property.Dynamic, + Property.IndexScope); /** * Reads how much of the source to log. The user can specify any value they * like and numbers are interpreted the maximum number of characters to log @@ -72,19 +84,11 @@ public final class IndexingSlowLog implements IndexingOperationListener { } catch (NumberFormatException e) { return Booleans.parseBoolean(value, true) ? Integer.MAX_VALUE : 0; } - }, true, Setting.Scope.INDEX); + }, Property.Dynamic, Property.IndexScope); IndexingSlowLog(IndexSettings indexSettings) { - this(indexSettings, Loggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index"), - Loggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".delete")); - } - - /** - * Build with the specified loggers. Only used to testing. - */ - IndexingSlowLog(IndexSettings indexSettings, ESLogger indexLogger, ESLogger deleteLogger) { - this.indexLogger = indexLogger; - this.deleteLogger = deleteLogger; + this.indexLogger = Loggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index", indexSettings.getSettings()); + this.index = indexSettings.getIndex(); indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING, this::setReformat); this.reformat = indexSettings.getValue(INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING); @@ -109,7 +113,6 @@ public final class IndexingSlowLog implements IndexingOperationListener { private void setLevel(SlowLogLevel level) { this.level = level; this.indexLogger.setLevel(level.name()); - this.deleteLogger.setLevel(level.name()); } private void setWarnThreshold(TimeValue warnThreshold) { @@ -141,13 +144,13 @@ public final class IndexingSlowLog implements IndexingOperationListener { private void postIndexing(ParsedDocument doc, long tookInNanos) { if (indexWarnThreshold >= 0 && tookInNanos > indexWarnThreshold) { - indexLogger.warn("{}", new SlowLogParsedDocumentPrinter(doc, tookInNanos, reformat, maxSourceCharsToLog)); + indexLogger.warn("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); } else if (indexInfoThreshold >= 0 && tookInNanos > indexInfoThreshold) { - indexLogger.info("{}", new SlowLogParsedDocumentPrinter(doc, tookInNanos, reformat, maxSourceCharsToLog)); + indexLogger.info("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); } else if (indexDebugThreshold >= 0 && tookInNanos > indexDebugThreshold) { - indexLogger.debug("{}", new SlowLogParsedDocumentPrinter(doc, tookInNanos, reformat, maxSourceCharsToLog)); + indexLogger.debug("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); } else if (indexTraceThreshold >= 0 && tookInNanos > indexTraceThreshold) { - indexLogger.trace("{}", new SlowLogParsedDocumentPrinter(doc, tookInNanos, reformat, maxSourceCharsToLog)); + indexLogger.trace("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); } } @@ -156,9 +159,11 @@ public final class IndexingSlowLog implements IndexingOperationListener { private final long tookInNanos; private final boolean reformat; private final int maxSourceCharsToLog; + private final Index index; - SlowLogParsedDocumentPrinter(ParsedDocument doc, long tookInNanos, boolean reformat, int maxSourceCharsToLog) { + SlowLogParsedDocumentPrinter(Index index, ParsedDocument doc, long tookInNanos, boolean reformat, int maxSourceCharsToLog) { this.doc = doc; + this.index = index; this.tookInNanos = tookInNanos; this.reformat = reformat; this.maxSourceCharsToLog = maxSourceCharsToLog; @@ -167,6 +172,7 @@ public final class IndexingSlowLog implements IndexingOperationListener { @Override public String toString() { StringBuilder sb = new StringBuilder(); + sb.append(index).append(" "); sb.append("took[").append(TimeValue.timeValueNanos(tookInNanos)).append("], took_millis[").append(TimeUnit.NANOSECONDS.toMillis(tookInNanos)).append("], "); sb.append("type[").append(doc.type()).append("], "); sb.append("id[").append(doc.id()).append("], "); diff --git a/core/src/main/java/org/elasticsearch/index/MergePolicyConfig.java b/core/src/main/java/org/elasticsearch/index/MergePolicyConfig.java index fc9f30cf3fd..c8d82eae888 100644 --- a/core/src/main/java/org/elasticsearch/index/MergePolicyConfig.java +++ b/core/src/main/java/org/elasticsearch/index/MergePolicyConfig.java @@ -25,6 +25,7 @@ import org.apache.lucene.index.TieredMergePolicy; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -126,15 +127,31 @@ public final class MergePolicyConfig { public static final ByteSizeValue DEFAULT_MAX_MERGED_SEGMENT = new ByteSizeValue(5, ByteSizeUnit.GB); public static final double DEFAULT_SEGMENTS_PER_TIER = 10.0d; public static final double DEFAULT_RECLAIM_DELETES_WEIGHT = 2.0d; - public static final Setting INDEX_COMPOUND_FORMAT_SETTING = new Setting<>("index.compound_format", Double.toString(TieredMergePolicy.DEFAULT_NO_CFS_RATIO), MergePolicyConfig::parseNoCFSRatio, true, Setting.Scope.INDEX); + public static final Setting INDEX_COMPOUND_FORMAT_SETTING = + new Setting<>("index.compound_format", Double.toString(TieredMergePolicy.DEFAULT_NO_CFS_RATIO), MergePolicyConfig::parseNoCFSRatio, + Property.Dynamic, Property.IndexScope); - public static final Setting INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING = Setting.doubleSetting("index.merge.policy.expunge_deletes_allowed", DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d, true, Setting.Scope.INDEX); - public static final Setting INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING = Setting.byteSizeSetting("index.merge.policy.floor_segment", DEFAULT_FLOOR_SEGMENT, true, Setting.Scope.INDEX); - public static final Setting INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING = Setting.intSetting("index.merge.policy.max_merge_at_once", DEFAULT_MAX_MERGE_AT_ONCE, 2, true, Setting.Scope.INDEX); - public static final Setting INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING = Setting.intSetting("index.merge.policy.max_merge_at_once_explicit", DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT, 2, true, Setting.Scope.INDEX); - public static final Setting INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING = Setting.byteSizeSetting("index.merge.policy.max_merged_segment", DEFAULT_MAX_MERGED_SEGMENT, true, Setting.Scope.INDEX); - public static final Setting INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING = Setting.doubleSetting("index.merge.policy.segments_per_tier", DEFAULT_SEGMENTS_PER_TIER, 2.0d, true, Setting.Scope.INDEX); - public static final Setting INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING = Setting.doubleSetting("index.merge.policy.reclaim_deletes_weight", DEFAULT_RECLAIM_DELETES_WEIGHT, 0.0d, true, Setting.Scope.INDEX); + public static final Setting INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING = + Setting.doubleSetting("index.merge.policy.expunge_deletes_allowed", DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d, + Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING = + Setting.byteSizeSetting("index.merge.policy.floor_segment", DEFAULT_FLOOR_SEGMENT, + Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING = + Setting.intSetting("index.merge.policy.max_merge_at_once", DEFAULT_MAX_MERGE_AT_ONCE, 2, + Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING = + Setting.intSetting("index.merge.policy.max_merge_at_once_explicit", DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT, 2, + Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING = + Setting.byteSizeSetting("index.merge.policy.max_merged_segment", DEFAULT_MAX_MERGED_SEGMENT, + Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING = + Setting.doubleSetting("index.merge.policy.segments_per_tier", DEFAULT_SEGMENTS_PER_TIER, 2.0d, + Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING = + Setting.doubleSetting("index.merge.policy.reclaim_deletes_weight", DEFAULT_RECLAIM_DELETES_WEIGHT, 0.0d, + Property.Dynamic, Property.IndexScope); public static final String INDEX_MERGE_ENABLED = "index.merge.enabled"; // don't convert to Setting<> and register... we only set this in tests and register via a plugin diff --git a/core/src/main/java/org/elasticsearch/index/MergeSchedulerConfig.java b/core/src/main/java/org/elasticsearch/index/MergeSchedulerConfig.java index 0d212a4eb30..2eb43a50ee4 100644 --- a/core/src/main/java/org/elasticsearch/index/MergeSchedulerConfig.java +++ b/core/src/main/java/org/elasticsearch/index/MergeSchedulerConfig.java @@ -21,6 +21,7 @@ package org.elasticsearch.index; import org.apache.lucene.index.ConcurrentMergeScheduler; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.util.concurrent.EsExecutors; /** @@ -51,9 +52,17 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; */ public final class MergeSchedulerConfig { - public static final Setting MAX_THREAD_COUNT_SETTING = new Setting<>("index.merge.scheduler.max_thread_count", (s) -> Integer.toString(Math.max(1, Math.min(4, EsExecutors.boundedNumberOfProcessors(s) / 2))), (s) -> Setting.parseInt(s, 1, "index.merge.scheduler.max_thread_count"), true, Setting.Scope.INDEX); - public static final Setting MAX_MERGE_COUNT_SETTING = new Setting<>("index.merge.scheduler.max_merge_count", (s) -> Integer.toString(MAX_THREAD_COUNT_SETTING.get(s) + 5), (s) -> Setting.parseInt(s, 1, "index.merge.scheduler.max_merge_count"), true, Setting.Scope.INDEX); - public static final Setting AUTO_THROTTLE_SETTING = Setting.boolSetting("index.merge.scheduler.auto_throttle", true, true, Setting.Scope.INDEX); + public static final Setting MAX_THREAD_COUNT_SETTING = + new Setting<>("index.merge.scheduler.max_thread_count", + (s) -> Integer.toString(Math.max(1, Math.min(4, EsExecutors.boundedNumberOfProcessors(s) / 2))), + (s) -> Setting.parseInt(s, 1, "index.merge.scheduler.max_thread_count"), Property.Dynamic, + Property.IndexScope); + public static final Setting MAX_MERGE_COUNT_SETTING = + new Setting<>("index.merge.scheduler.max_merge_count", + (s) -> Integer.toString(MAX_THREAD_COUNT_SETTING.get(s) + 5), + (s) -> Setting.parseInt(s, 1, "index.merge.scheduler.max_merge_count"), Property.Dynamic, Property.IndexScope); + public static final Setting AUTO_THROTTLE_SETTING = + Setting.boolSetting("index.merge.scheduler.auto_throttle", true, Property.Dynamic, Property.IndexScope); private volatile boolean autoThrottle; private volatile int maxThreadCount; diff --git a/core/src/main/java/org/elasticsearch/index/SearchSlowLog.java b/core/src/main/java/org/elasticsearch/index/SearchSlowLog.java index df3139fe57c..cfa779d64aa 100644 --- a/core/src/main/java/org/elasticsearch/index/SearchSlowLog.java +++ b/core/src/main/java/org/elasticsearch/index/SearchSlowLog.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.search.internal.SearchContext; @@ -50,16 +51,35 @@ public final class SearchSlowLog { private final ESLogger fetchLogger; private static final String INDEX_SEARCH_SLOWLOG_PREFIX = "index.search.slowlog"; - public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.warn", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.info", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.debug", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.trace", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.warn", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.info", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.debug", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.trace", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_SEARCH_SLOWLOG_REFORMAT = Setting.boolSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".reformat", true, true, Setting.Scope.INDEX); - public static final Setting INDEX_SEARCH_SLOWLOG_LEVEL = new Setting<>(INDEX_SEARCH_SLOWLOG_PREFIX + ".level", SlowLogLevel.TRACE.name(), SlowLogLevel::parse, true, Setting.Scope.INDEX); + public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING = + Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.warn", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING = + Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.info", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING = + Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.debug", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING = + Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.trace", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING = + Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.warn", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING = + Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.info", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING = + Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.debug", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING = + Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.trace", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_SEARCH_SLOWLOG_REFORMAT = + Setting.boolSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".reformat", true, Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_SEARCH_SLOWLOG_LEVEL = + new Setting<>(INDEX_SEARCH_SLOWLOG_PREFIX + ".level", SlowLogLevel.TRACE.name(), SlowLogLevel::parse, Property.Dynamic, + Property.IndexScope); public SearchSlowLog(IndexSettings indexSettings) { diff --git a/core/src/main/java/org/elasticsearch/index/analysis/Analysis.java b/core/src/main/java/org/elasticsearch/index/analysis/Analysis.java index a27b49b9618..b7481e78496 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/Analysis.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/Analysis.java @@ -20,8 +20,8 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.NumericTokenStream; import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.LegacyNumericTokenStream; import org.apache.lucene.analysis.ar.ArabicAnalyzer; import org.apache.lucene.analysis.bg.BulgarianAnalyzer; import org.apache.lucene.analysis.br.BrazilianAnalyzer; @@ -300,7 +300,7 @@ public class Analysis { *

Although most analyzers generate character terms (CharTermAttribute), * some token only contain binary terms (BinaryTermAttribute, * CharTermAttribute being a special type of BinaryTermAttribute), such as - * {@link NumericTokenStream} and unsuitable for highlighting and + * {@link LegacyNumericTokenStream} and unsuitable for highlighting and * more-like-this queries which expect character terms.

*/ public static boolean isCharacterTokenStream(TokenStream tokenStream) { diff --git a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java index a8a7b4fe004..3c2d6bfb260 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java @@ -85,6 +85,10 @@ public final class AnalysisRegistry implements Closeable { this.analyzers = Collections.unmodifiableMap(analyzerBuilder); } + public HunspellService getHunspellService() { + return hunspellService; + } + /** * Returns a registered {@link TokenizerFactory} provider by name or null if the tokenizer was not registered */ diff --git a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisService.java b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisService.java index 09e96f3743b..453552b9dd1 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisService.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisService.java @@ -155,7 +155,7 @@ public class AnalysisService extends AbstractIndexComponent implements Closeable // because analyzers are aliased, they might be closed several times // an NPE is thrown in this case, so ignore.... } catch (Exception e) { - logger.debug("failed to close analyzer " + analyzer); + logger.debug("failed to close analyzer {}", analyzer); } } } diff --git a/core/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenFilterFactory.java index f28f374220a..82ed526323d 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenFilterFactory.java @@ -21,10 +21,8 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter; -import org.apache.lucene.analysis.ngram.Lucene43EdgeNGramTokenFilter; import org.apache.lucene.analysis.ngram.NGramTokenFilter; import org.apache.lucene.analysis.reverse.ReverseStringFilter; -import org.apache.lucene.util.Version; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -43,14 +41,11 @@ public class EdgeNGramTokenFilterFactory extends AbstractTokenFilterFactory { public static final int SIDE_BACK = 2; private final int side; - private org.elasticsearch.Version esVersion; - public EdgeNGramTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); this.minGram = settings.getAsInt("min_gram", NGramTokenFilter.DEFAULT_MIN_NGRAM_SIZE); this.maxGram = settings.getAsInt("max_gram", NGramTokenFilter.DEFAULT_MAX_NGRAM_SIZE); this.side = parseSide(settings.get("side", "front")); - this.esVersion = org.elasticsearch.Version.indexCreated(indexSettings.getSettings()); } static int parseSide(String side) { @@ -70,15 +65,7 @@ public class EdgeNGramTokenFilterFactory extends AbstractTokenFilterFactory { result = new ReverseStringFilter(result); } - if (version.onOrAfter(Version.LUCENE_4_3) && esVersion.onOrAfter(org.elasticsearch.Version.V_0_90_2)) { - /* - * We added this in 0.90.2 but 0.90.1 used LUCENE_43 already so we can not rely on the lucene version. - * Yet if somebody uses 0.90.2 or higher with a prev. lucene version we should also use the deprecated version. - */ - result = new EdgeNGramTokenFilter(result, minGram, maxGram); - } else { - result = new Lucene43EdgeNGramTokenFilter(result, minGram, maxGram); - } + result = new EdgeNGramTokenFilter(result, minGram, maxGram); // side=BACK is not supported anymore but applying ReverseStringFilter up-front and after the token filter has the same effect if (side == SIDE_BACK) { diff --git a/core/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenizerFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenizerFactory.java index 2c50d8d4d66..77d122393ce 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenizerFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenizerFactory.java @@ -21,9 +21,7 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer; -import org.apache.lucene.analysis.ngram.Lucene43EdgeNGramTokenizer; import org.apache.lucene.analysis.ngram.NGramTokenizer; -import org.apache.lucene.util.Version; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -33,55 +31,33 @@ import static org.elasticsearch.index.analysis.NGramTokenizerFactory.parseTokenC /** * */ -@SuppressWarnings("deprecation") public class EdgeNGramTokenizerFactory extends AbstractTokenizerFactory { private final int minGram; private final int maxGram; - private final Lucene43EdgeNGramTokenizer.Side side; - private final CharMatcher matcher; - - protected org.elasticsearch.Version esVersion; public EdgeNGramTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); this.minGram = settings.getAsInt("min_gram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE); this.maxGram = settings.getAsInt("max_gram", NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); - this.side = Lucene43EdgeNGramTokenizer.Side.getSide(settings.get("side", Lucene43EdgeNGramTokenizer.DEFAULT_SIDE.getLabel())); this.matcher = parseTokenChars(settings.getAsArray("token_chars")); - this.esVersion = indexSettings.getIndexVersionCreated(); } @Override public Tokenizer create() { - if (version.onOrAfter(Version.LUCENE_4_3) && esVersion.onOrAfter(org.elasticsearch.Version.V_0_90_2)) { - /* - * We added this in 0.90.2 but 0.90.1 used LUCENE_43 already so we can not rely on the lucene version. - * Yet if somebody uses 0.90.2 or higher with a prev. lucene version we should also use the deprecated version. - */ - if (side == Lucene43EdgeNGramTokenizer.Side.BACK) { - throw new IllegalArgumentException("side=back is not supported anymore. Please fix your analysis chain or use" - + " an older compatibility version (<=4.2) but beware that it might cause highlighting bugs." - + " To obtain the same behavior as the previous version please use \"edgeNGram\" filter which still supports side=back" - + " in combination with a \"keyword\" tokenizer"); - } - final Version version = this.version == Version.LUCENE_4_3 ? Version.LUCENE_4_4 : this.version; // always use 4.4 or higher - if (matcher == null) { - return new EdgeNGramTokenizer(minGram, maxGram); - } else { - return new EdgeNGramTokenizer(minGram, maxGram) { - @Override - protected boolean isTokenChar(int chr) { - return matcher.isTokenChar(chr); - } - }; - } + if (matcher == null) { + return new EdgeNGramTokenizer(minGram, maxGram); } else { - return new Lucene43EdgeNGramTokenizer(side, minGram, maxGram); + return new EdgeNGramTokenizer(minGram, maxGram) { + @Override + protected boolean isTokenChar(int chr) { + return matcher.isTokenChar(chr); + } + }; } } } diff --git a/core/src/main/java/org/elasticsearch/index/analysis/KeepWordFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/KeepWordFilterFactory.java index 82b8df70741..ab00657313d 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/KeepWordFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/KeepWordFilterFactory.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.miscellaneous.KeepWordFilter; -import org.apache.lucene.analysis.miscellaneous.Lucene43KeepWordFilter; import org.apache.lucene.analysis.util.CharArraySet; import org.apache.lucene.util.Version; import org.elasticsearch.common.settings.Settings; @@ -40,9 +39,6 @@ import org.elasticsearch.index.IndexSettings; *
  • {@value #KEEP_WORDS_PATH_KEY} an reference to a file containing the words * / tokens to keep. Note: this is an alternative to {@value #KEEP_WORDS_KEY} if * both are set an exception will be thrown.
  • - *
  • {@value #ENABLE_POS_INC_KEY} true iff the filter should - * maintain position increments for dropped tokens. The default is - * true.
  • *
  • {@value #KEEP_WORDS_CASE_KEY} to use case sensitive keep words. The * default is false which corresponds to case-sensitive.
  • * @@ -51,10 +47,11 @@ import org.elasticsearch.index.IndexSettings; */ public class KeepWordFilterFactory extends AbstractTokenFilterFactory { private final CharArraySet keepWords; - private final boolean enablePositionIncrements; private static final String KEEP_WORDS_KEY = "keep_words"; private static final String KEEP_WORDS_PATH_KEY = KEEP_WORDS_KEY + "_path"; private static final String KEEP_WORDS_CASE_KEY = KEEP_WORDS_KEY + "_case"; // for javadoc + + // unsupported ancient option private static final String ENABLE_POS_INC_KEY = "enable_position_increments"; public KeepWordFilterFactory(IndexSettings indexSettings, @@ -68,26 +65,14 @@ public class KeepWordFilterFactory extends AbstractTokenFilterFactory { throw new IllegalArgumentException("keep requires either `" + KEEP_WORDS_KEY + "` or `" + KEEP_WORDS_PATH_KEY + "` to be configured"); } - if (version.onOrAfter(Version.LUCENE_4_4) && settings.get(ENABLE_POS_INC_KEY) != null) { - throw new IllegalArgumentException(ENABLE_POS_INC_KEY + " is not supported anymore. Please fix your analysis chain or use" - + " an older compatibility version (<=4.3) but beware that it might cause highlighting bugs."); + if (settings.get(ENABLE_POS_INC_KEY) != null) { + throw new IllegalArgumentException(ENABLE_POS_INC_KEY + " is not supported anymore. Please fix your analysis chain"); } - enablePositionIncrements = version.onOrAfter(Version.LUCENE_4_4) ? true : settings.getAsBoolean(ENABLE_POS_INC_KEY, true); - this.keepWords = Analysis.getWordSet(env, settings, KEEP_WORDS_KEY); - } @Override public TokenStream create(TokenStream tokenStream) { - if (version.onOrAfter(Version.LUCENE_4_4)) { - return new KeepWordFilter(tokenStream, keepWords); - } else { - @SuppressWarnings("deprecation") - final TokenStream filter = new Lucene43KeepWordFilter(enablePositionIncrements, tokenStream, keepWords); - return filter; - } + return new KeepWordFilter(tokenStream, keepWords); } - - } diff --git a/core/src/main/java/org/elasticsearch/index/analysis/LengthTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/LengthTokenFilterFactory.java index 3af93bc79de..e55e24ccae0 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/LengthTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/LengthTokenFilterFactory.java @@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.miscellaneous.LengthFilter; -import org.apache.lucene.analysis.miscellaneous.Lucene43LengthFilter; -import org.apache.lucene.util.Version; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -34,28 +32,21 @@ public class LengthTokenFilterFactory extends AbstractTokenFilterFactory { private final int min; private final int max; - private final boolean enablePositionIncrements; + + // ancient unsupported option private static final String ENABLE_POS_INC_KEY = "enable_position_increments"; public LengthTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); min = settings.getAsInt("min", 0); max = settings.getAsInt("max", Integer.MAX_VALUE); - if (version.onOrAfter(Version.LUCENE_4_4) && settings.get(ENABLE_POS_INC_KEY) != null) { - throw new IllegalArgumentException(ENABLE_POS_INC_KEY + " is not supported anymore. Please fix your analysis chain or use" - + " an older compatibility version (<=4.3) but beware that it might cause highlighting bugs."); + if (settings.get(ENABLE_POS_INC_KEY) != null) { + throw new IllegalArgumentException(ENABLE_POS_INC_KEY + " is not supported anymore. Please fix your analysis chain"); } - enablePositionIncrements = version.onOrAfter(Version.LUCENE_4_4) ? true : settings.getAsBoolean(ENABLE_POS_INC_KEY, true); } @Override public TokenStream create(TokenStream tokenStream) { - if (version.onOrAfter(Version.LUCENE_4_4)) { - return new LengthFilter(tokenStream, min, max); - } else { - @SuppressWarnings("deprecation") - final TokenStream filter = new Lucene43LengthFilter(enablePositionIncrements, tokenStream, min, max); - return filter; - } + return new LengthFilter(tokenStream, min, max); } } diff --git a/core/src/main/java/org/elasticsearch/index/analysis/NGramTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/NGramTokenFilterFactory.java index 80e0aeb32eb..0905b310735 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/NGramTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/NGramTokenFilterFactory.java @@ -20,9 +20,7 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.ngram.Lucene43NGramTokenFilter; import org.apache.lucene.analysis.ngram.NGramTokenFilter; -import org.apache.lucene.util.Version; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -44,14 +42,8 @@ public class NGramTokenFilterFactory extends AbstractTokenFilterFactory { this.maxGram = settings.getAsInt("max_gram", NGramTokenFilter.DEFAULT_MAX_NGRAM_SIZE); } - @SuppressWarnings("deprecation") @Override public TokenStream create(TokenStream tokenStream) { - final Version version = this.version == Version.LUCENE_4_3 ? Version.LUCENE_4_4 : this.version; // we supported it since 4.3 - if (version.onOrAfter(Version.LUCENE_4_3)) { - return new NGramTokenFilter(tokenStream, minGram, maxGram); - } else { - return new Lucene43NGramTokenFilter(tokenStream, minGram, maxGram); - } + return new NGramTokenFilter(tokenStream, minGram, maxGram); } } \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java b/core/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java index 25ff8f96834..1dd562c4bb1 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java @@ -93,7 +93,7 @@ public class NamedAnalyzer extends DelegatingAnalyzerWrapper { public String toString() { return "analyzer name[" + name + "], analyzer [" + analyzer + "]"; } - + /** It is an error if this is ever used, it means we screwed up! */ static final ReuseStrategy ERROR_STRATEGY = new Analyzer.ReuseStrategy() { @Override diff --git a/core/src/main/java/org/elasticsearch/index/analysis/NumericDateTokenizer.java b/core/src/main/java/org/elasticsearch/index/analysis/NumericDateTokenizer.java index 03b502d4478..21a13eab573 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/NumericDateTokenizer.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/NumericDateTokenizer.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.analysis; -import org.apache.lucene.analysis.NumericTokenStream; +import org.apache.lucene.analysis.LegacyNumericTokenStream; import org.joda.time.format.DateTimeFormatter; import java.io.IOException; @@ -30,11 +30,11 @@ import java.io.IOException; public class NumericDateTokenizer extends NumericTokenizer { public NumericDateTokenizer(int precisionStep, char[] buffer, DateTimeFormatter dateTimeFormatter) throws IOException { - super(new NumericTokenStream(precisionStep), buffer, dateTimeFormatter); + super(new LegacyNumericTokenStream(precisionStep), buffer, dateTimeFormatter); } @Override - protected void setValue(NumericTokenStream tokenStream, String value) { + protected void setValue(LegacyNumericTokenStream tokenStream, String value) { tokenStream.setLongValue(((DateTimeFormatter) extra).parseMillis(value)); } } \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/index/analysis/NumericDoubleAnalyzer.java b/core/src/main/java/org/elasticsearch/index/analysis/NumericDoubleAnalyzer.java index e90409421d2..77716e7a43d 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/NumericDoubleAnalyzer.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/NumericDoubleAnalyzer.java @@ -56,4 +56,4 @@ public class NumericDoubleAnalyzer extends NumericAnalyzer protected NumericFloatTokenizer createNumericTokenizer(char[] buffer) throws IOException { return new NumericFloatTokenizer(precisionStep, buffer); } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/index/analysis/NumericFloatTokenizer.java b/core/src/main/java/org/elasticsearch/index/analysis/NumericFloatTokenizer.java index 02d42b8eef8..b7b2f6577f9 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/NumericFloatTokenizer.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/NumericFloatTokenizer.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.analysis; -import org.apache.lucene.analysis.NumericTokenStream; +import org.apache.lucene.analysis.LegacyNumericTokenStream; import java.io.IOException; @@ -29,11 +29,11 @@ import java.io.IOException; public class NumericFloatTokenizer extends NumericTokenizer { public NumericFloatTokenizer(int precisionStep, char[] buffer) throws IOException { - super(new NumericTokenStream(precisionStep), buffer, null); + super(new LegacyNumericTokenStream(precisionStep), buffer, null); } @Override - protected void setValue(NumericTokenStream tokenStream, String value) { + protected void setValue(LegacyNumericTokenStream tokenStream, String value) { tokenStream.setFloatValue(Float.parseFloat(value)); } } \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/index/analysis/NumericIntegerTokenizer.java b/core/src/main/java/org/elasticsearch/index/analysis/NumericIntegerTokenizer.java index 3f758c4900e..3d8b1309997 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/NumericIntegerTokenizer.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/NumericIntegerTokenizer.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.analysis; -import org.apache.lucene.analysis.NumericTokenStream; +import org.apache.lucene.analysis.LegacyNumericTokenStream; import java.io.IOException; @@ -29,11 +29,11 @@ import java.io.IOException; public class NumericIntegerTokenizer extends NumericTokenizer { public NumericIntegerTokenizer(int precisionStep, char[] buffer) throws IOException { - super(new NumericTokenStream(precisionStep), buffer, null); + super(new LegacyNumericTokenStream(precisionStep), buffer, null); } @Override - protected void setValue(NumericTokenStream tokenStream, String value) { + protected void setValue(LegacyNumericTokenStream tokenStream, String value) { tokenStream.setIntValue(Integer.parseInt(value)); } } diff --git a/core/src/main/java/org/elasticsearch/index/analysis/NumericLongAnalyzer.java b/core/src/main/java/org/elasticsearch/index/analysis/NumericLongAnalyzer.java index ab112396392..9b865920341 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/NumericLongAnalyzer.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/NumericLongAnalyzer.java @@ -56,4 +56,4 @@ public class NumericLongAnalyzer extends NumericAnalyzer { protected NumericLongTokenizer createNumericTokenizer(char[] buffer) throws IOException { return new NumericLongTokenizer(precisionStep, buffer); } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/index/analysis/NumericLongTokenizer.java b/core/src/main/java/org/elasticsearch/index/analysis/NumericLongTokenizer.java index d926371ca48..63abd2d9ed4 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/NumericLongTokenizer.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/NumericLongTokenizer.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.analysis; -import org.apache.lucene.analysis.NumericTokenStream; +import org.apache.lucene.analysis.LegacyNumericTokenStream; import java.io.IOException; @@ -29,11 +29,11 @@ import java.io.IOException; public class NumericLongTokenizer extends NumericTokenizer { public NumericLongTokenizer(int precisionStep, char[] buffer) throws IOException { - super(new NumericTokenStream(precisionStep), buffer, null); + super(new LegacyNumericTokenStream(precisionStep), buffer, null); } @Override - protected void setValue(NumericTokenStream tokenStream, String value) { + protected void setValue(LegacyNumericTokenStream tokenStream, String value) { tokenStream.setLongValue(Long.parseLong(value)); } } diff --git a/core/src/main/java/org/elasticsearch/index/analysis/NumericTokenizer.java b/core/src/main/java/org/elasticsearch/index/analysis/NumericTokenizer.java index ccd87628988..6339b11636e 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/NumericTokenizer.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/NumericTokenizer.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.analysis; -import org.apache.lucene.analysis.NumericTokenStream; +import org.apache.lucene.analysis.LegacyNumericTokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.util.Attribute; import org.apache.lucene.util.AttributeFactory; @@ -45,12 +45,12 @@ public abstract class NumericTokenizer extends Tokenizer { }; } - private final NumericTokenStream numericTokenStream; + private final LegacyNumericTokenStream numericTokenStream; private final char[] buffer; protected final Object extra; private boolean started; - protected NumericTokenizer(NumericTokenStream numericTokenStream, char[] buffer, Object extra) throws IOException { + protected NumericTokenizer(LegacyNumericTokenStream numericTokenStream, char[] buffer, Object extra) throws IOException { super(delegatingAttributeFactory(numericTokenStream)); this.numericTokenStream = numericTokenStream; // Add attributes from the numeric token stream, this works fine because the attribute factory delegates to numericTokenStream @@ -95,5 +95,5 @@ public abstract class NumericTokenizer extends Tokenizer { numericTokenStream.close(); } - protected abstract void setValue(NumericTokenStream tokenStream, String value); + protected abstract void setValue(LegacyNumericTokenStream tokenStream, String value); } diff --git a/core/src/main/java/org/elasticsearch/index/analysis/PatternAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/PatternAnalyzerProvider.java index 74150c13bf6..f00988f4ad2 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/PatternAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/PatternAnalyzerProvider.java @@ -40,13 +40,7 @@ public class PatternAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final StandardAnalyzer standardAnalyzer; - private final Version esVersion; public StandardAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); - this.esVersion = indexSettings.getIndexVersionCreated(); - final CharArraySet defaultStopwords; - if (esVersion.onOrAfter(Version.V_1_0_0_Beta1)) { - defaultStopwords = CharArraySet.EMPTY_SET; - } else { - defaultStopwords = StopAnalyzer.ENGLISH_STOP_WORDS_SET; - } - + final CharArraySet defaultStopwords = CharArraySet.EMPTY_SET; CharArraySet stopWords = Analysis.parseStopWords(env, settings, defaultStopwords); int maxTokenLength = settings.getAsInt("max_token_length", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH); standardAnalyzer = new StandardAnalyzer(stopWords); diff --git a/core/src/main/java/org/elasticsearch/index/analysis/StandardHtmlStripAnalyzer.java b/core/src/main/java/org/elasticsearch/index/analysis/StandardHtmlStripAnalyzer.java index 156ad1ff07e..a755e54db17 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/StandardHtmlStripAnalyzer.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/StandardHtmlStripAnalyzer.java @@ -26,10 +26,8 @@ import org.apache.lucene.analysis.core.StopAnalyzer; import org.apache.lucene.analysis.core.StopFilter; import org.apache.lucene.analysis.standard.StandardFilter; import org.apache.lucene.analysis.standard.StandardTokenizer; -import org.apache.lucene.analysis.standard.std40.StandardTokenizer40; import org.apache.lucene.analysis.util.CharArraySet; import org.apache.lucene.analysis.util.StopwordAnalyzerBase; -import org.apache.lucene.util.Version; public class StandardHtmlStripAnalyzer extends StopwordAnalyzerBase { @@ -47,12 +45,7 @@ public class StandardHtmlStripAnalyzer extends StopwordAnalyzerBase { @Override protected TokenStreamComponents createComponents(final String fieldName) { - final Tokenizer src; - if (getVersion().onOrAfter(Version.LUCENE_4_7_0)) { - src = new StandardTokenizer(); - } else { - src = new StandardTokenizer40(); - } + final Tokenizer src = new StandardTokenizer(); TokenStream tok = new StandardFilter(src); tok = new LowerCaseFilter(tok); if (!stopwords.isEmpty()) { diff --git a/core/src/main/java/org/elasticsearch/index/analysis/StandardHtmlStripAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/StandardHtmlStripAnalyzerProvider.java index b2e95737ee1..a3c65b0a17b 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/StandardHtmlStripAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/StandardHtmlStripAnalyzerProvider.java @@ -32,17 +32,10 @@ import org.elasticsearch.index.IndexSettings; public class StandardHtmlStripAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final StandardHtmlStripAnalyzer analyzer; - private final Version esVersion; public StandardHtmlStripAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); - this.esVersion = indexSettings.getIndexVersionCreated(); - final CharArraySet defaultStopwords; - if (esVersion.onOrAfter(Version.V_1_0_0_RC1)) { - defaultStopwords = CharArraySet.EMPTY_SET; - } else { - defaultStopwords = StopAnalyzer.ENGLISH_STOP_WORDS_SET; - } + final CharArraySet defaultStopwords = CharArraySet.EMPTY_SET; CharArraySet stopWords = Analysis.parseStopWords(env, settings, defaultStopwords); analyzer = new StandardHtmlStripAnalyzer(stopWords); analyzer.setVersion(version); diff --git a/core/src/main/java/org/elasticsearch/index/analysis/StandardTokenizerFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/StandardTokenizerFactory.java index d0702bdbc4b..3f142a1ab43 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/StandardTokenizerFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/StandardTokenizerFactory.java @@ -22,8 +22,6 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.analysis.standard.StandardTokenizer; -import org.apache.lucene.analysis.standard.std40.StandardTokenizer40; -import org.apache.lucene.util.Version; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -41,14 +39,8 @@ public class StandardTokenizerFactory extends AbstractTokenizerFactory { @Override public Tokenizer create() { - if (version.onOrAfter(Version.LUCENE_4_7_0)) { - StandardTokenizer tokenizer = new StandardTokenizer(); - tokenizer.setMaxTokenLength(maxTokenLength); - return tokenizer; - } else { - StandardTokenizer40 tokenizer = new StandardTokenizer40(); - tokenizer.setMaxTokenLength(maxTokenLength); - return tokenizer; - } + StandardTokenizer tokenizer = new StandardTokenizer(); + tokenizer.setMaxTokenLength(maxTokenLength); + return tokenizer; } } diff --git a/core/src/main/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactory.java index 1154f9b0f79..317b3e07850 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactory.java @@ -122,11 +122,7 @@ public class StemmerTokenFilterFactory extends AbstractTokenFilterFactory { // English stemmers } else if ("english".equalsIgnoreCase(language)) { - if (indexVersion.onOrAfter(Version.V_1_3_0)) { - return new PorterStemFilter(tokenStream); - } else { - return new SnowballFilter(tokenStream, new EnglishStemmer()); - } + return new PorterStemFilter(tokenStream); } else if ("light_english".equalsIgnoreCase(language) || "lightEnglish".equalsIgnoreCase(language) || "kstem".equalsIgnoreCase(language)) { return new KStemFilter(tokenStream); @@ -135,11 +131,7 @@ public class StemmerTokenFilterFactory extends AbstractTokenFilterFactory { } else if ("porter".equalsIgnoreCase(language)) { return new PorterStemFilter(tokenStream); } else if ("porter2".equalsIgnoreCase(language)) { - if (indexVersion.onOrAfter(Version.V_1_3_0)) { - return new SnowballFilter(tokenStream, new EnglishStemmer()); - } else { - return new SnowballFilter(tokenStream, new PorterStemmer()); - } + return new SnowballFilter(tokenStream, new EnglishStemmer()); } else if ("minimal_english".equalsIgnoreCase(language) || "minimalEnglish".equalsIgnoreCase(language)) { return new EnglishMinimalStemFilter(tokenStream); } else if ("possessive_english".equalsIgnoreCase(language) || "possessiveEnglish".equalsIgnoreCase(language)) { diff --git a/core/src/main/java/org/elasticsearch/index/analysis/StopTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/StopTokenFilterFactory.java index 6ab0c3fc9c8..322fcea452f 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/StopTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/StopTokenFilterFactory.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.core.Lucene43StopFilter; import org.apache.lucene.analysis.core.StopAnalyzer; import org.apache.lucene.analysis.core.StopFilter; import org.apache.lucene.analysis.util.CharArraySet; @@ -42,7 +41,6 @@ public class StopTokenFilterFactory extends AbstractTokenFilterFactory { private final boolean ignoreCase; - private final boolean enablePositionIncrements; private final boolean removeTrailing; public StopTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { @@ -50,21 +48,15 @@ public class StopTokenFilterFactory extends AbstractTokenFilterFactory { this.ignoreCase = settings.getAsBoolean("ignore_case", false); this.removeTrailing = settings.getAsBoolean("remove_trailing", true); this.stopWords = Analysis.parseStopWords(env, settings, StopAnalyzer.ENGLISH_STOP_WORDS_SET, ignoreCase); - if (version.onOrAfter(Version.LUCENE_4_4) && settings.get("enable_position_increments") != null) { - throw new IllegalArgumentException("enable_position_increments is not supported anymore as of Lucene 4.4 as it can create broken token streams." - + " Please fix your analysis chain or use an older compatibility version (<= 4.3)."); + if (settings.get("enable_position_increments") != null) { + throw new IllegalArgumentException("enable_position_increments is not supported anymore. Please fix your analysis chain"); } - this.enablePositionIncrements = settings.getAsBoolean("enable_position_increments", true); } @Override public TokenStream create(TokenStream tokenStream) { if (removeTrailing) { - if (version.onOrAfter(Version.LUCENE_4_4)) { - return new StopFilter(tokenStream, stopWords); - } else { - return new Lucene43StopFilter(enablePositionIncrements, tokenStream, stopWords); - } + return new StopFilter(tokenStream, stopWords); } else { return new SuggestStopFilter(tokenStream, stopWords); } diff --git a/core/src/main/java/org/elasticsearch/index/analysis/TrimTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/TrimTokenFilterFactory.java index a80c36b5a3e..c77467b2b41 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/TrimTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/TrimTokenFilterFactory.java @@ -20,9 +20,7 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.miscellaneous.Lucene43TrimFilter; import org.apache.lucene.analysis.miscellaneous.TrimFilter; -import org.apache.lucene.util.Version; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -32,26 +30,17 @@ import org.elasticsearch.index.IndexSettings; */ public class TrimTokenFilterFactory extends AbstractTokenFilterFactory { - private final boolean updateOffsets; private static final String UPDATE_OFFSETS_KEY = "update_offsets"; public TrimTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); - if (version.onOrAfter(Version.LUCENE_4_4_0) && settings.get(UPDATE_OFFSETS_KEY) != null) { - throw new IllegalArgumentException(UPDATE_OFFSETS_KEY + " is not supported anymore. Please fix your analysis chain or use" - + " an older compatibility version (<=4.3) but beware that it might cause highlighting bugs."); + if (settings.get(UPDATE_OFFSETS_KEY) != null) { + throw new IllegalArgumentException(UPDATE_OFFSETS_KEY + " is not supported anymore. Please fix your analysis chain"); } - this.updateOffsets = settings.getAsBoolean("update_offsets", false); } @Override public TokenStream create(TokenStream tokenStream) { - if (version.onOrAfter(Version.LUCENE_4_4_0)) { - return new TrimFilter(tokenStream); - } else { - @SuppressWarnings("deprecation") - final TokenStream filter = new Lucene43TrimFilter(tokenStream, updateOffsets); - return filter; - } + return new TrimFilter(tokenStream); } } diff --git a/core/src/main/java/org/elasticsearch/index/analysis/UAX29URLEmailTokenizerFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/UAX29URLEmailTokenizerFactory.java index 0668409fa07..3e75d214bd3 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/UAX29URLEmailTokenizerFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/UAX29URLEmailTokenizerFactory.java @@ -22,8 +22,6 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer; -import org.apache.lucene.analysis.standard.std40.UAX29URLEmailTokenizer40; -import org.apache.lucene.util.Version; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -42,14 +40,8 @@ public class UAX29URLEmailTokenizerFactory extends AbstractTokenizerFactory { @Override public Tokenizer create() { - if (version.onOrAfter(Version.LUCENE_4_7)) { - UAX29URLEmailTokenizer tokenizer = new UAX29URLEmailTokenizer(); - tokenizer.setMaxTokenLength(maxTokenLength); - return tokenizer; - } else { - UAX29URLEmailTokenizer40 tokenizer = new UAX29URLEmailTokenizer40(); - tokenizer.setMaxTokenLength(maxTokenLength); - return tokenizer; - } + UAX29URLEmailTokenizer tokenizer = new UAX29URLEmailTokenizer(); + tokenizer.setMaxTokenLength(maxTokenLength); + return tokenizer; } } \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactory.java index 1d5a9563130..118d7f84a11 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactory.java @@ -20,11 +20,9 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.miscellaneous.Lucene47WordDelimiterFilter; import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter; import org.apache.lucene.analysis.miscellaneous.WordDelimiterIterator; import org.apache.lucene.analysis.util.CharArraySet; -import org.apache.lucene.util.Version; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -96,17 +94,10 @@ public class WordDelimiterTokenFilterFactory extends AbstractTokenFilterFactory @Override public TokenStream create(TokenStream tokenStream) { - if (version.onOrAfter(Version.LUCENE_4_8)) { - return new WordDelimiterFilter(tokenStream, + return new WordDelimiterFilter(tokenStream, charTypeTable, flags, protoWords); - } else { - return new Lucene47WordDelimiterFilter(tokenStream, - charTypeTable, - flags, - protoWords); - } } public int getFlag(int flag, Settings settings, String key, boolean defaultValue) { diff --git a/core/src/main/java/org/elasticsearch/index/analysis/compound/DictionaryCompoundWordTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/compound/DictionaryCompoundWordTokenFilterFactory.java index 8d65e008f25..fc9719d36b1 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/compound/DictionaryCompoundWordTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/compound/DictionaryCompoundWordTokenFilterFactory.java @@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis.compound; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.compound.DictionaryCompoundWordTokenFilter; -import org.apache.lucene.analysis.compound.Lucene43DictionaryCompoundWordTokenFilter; -import org.apache.lucene.util.Version; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -41,12 +39,7 @@ public class DictionaryCompoundWordTokenFilterFactory extends AbstractCompoundWo @Override public TokenStream create(TokenStream tokenStream) { - if (version.onOrAfter(Version.LUCENE_4_4_0)) { - return new DictionaryCompoundWordTokenFilter(tokenStream, wordList, minWordSize, - minSubwordSize, maxSubwordSize, onlyLongestMatch); - } else { - return new Lucene43DictionaryCompoundWordTokenFilter(tokenStream, wordList, minWordSize, - minSubwordSize, maxSubwordSize, onlyLongestMatch); - } + return new DictionaryCompoundWordTokenFilter(tokenStream, wordList, minWordSize, + minSubwordSize, maxSubwordSize, onlyLongestMatch); } } diff --git a/core/src/main/java/org/elasticsearch/index/analysis/compound/HyphenationCompoundWordTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/compound/HyphenationCompoundWordTokenFilterFactory.java index 42a29784acc..152d4395ef3 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/compound/HyphenationCompoundWordTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/compound/HyphenationCompoundWordTokenFilterFactory.java @@ -21,9 +21,7 @@ package org.elasticsearch.index.analysis.compound; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.compound.HyphenationCompoundWordTokenFilter; -import org.apache.lucene.analysis.compound.Lucene43HyphenationCompoundWordTokenFilter; import org.apache.lucene.analysis.compound.hyphenation.HyphenationTree; -import org.apache.lucene.util.Version; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -60,12 +58,7 @@ public class HyphenationCompoundWordTokenFilterFactory extends AbstractCompoundW @Override public TokenStream create(TokenStream tokenStream) { - if (version.onOrAfter(Version.LUCENE_4_4_0)) { - return new HyphenationCompoundWordTokenFilter(tokenStream, hyphenationTree, wordList, minWordSize, - minSubwordSize, maxSubwordSize, onlyLongestMatch); - } else { - return new Lucene43HyphenationCompoundWordTokenFilter(tokenStream, hyphenationTree, wordList, minWordSize, - minSubwordSize, maxSubwordSize, onlyLongestMatch); - } + return new HyphenationCompoundWordTokenFilter(tokenStream, hyphenationTree, wordList, minWordSize, + minSubwordSize, maxSubwordSize, onlyLongestMatch); } } diff --git a/core/src/main/java/org/elasticsearch/index/cache/IndexCache.java b/core/src/main/java/org/elasticsearch/index/cache/IndexCache.java index 61733f24695..b41f5bc0125 100644 --- a/core/src/main/java/org/elasticsearch/index/cache/IndexCache.java +++ b/core/src/main/java/org/elasticsearch/index/cache/IndexCache.java @@ -24,6 +24,7 @@ import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; import org.elasticsearch.index.cache.query.QueryCache; +import org.elasticsearch.index.percolator.PercolatorQueryCache; import java.io.Closeable; import java.io.IOException; @@ -35,11 +36,14 @@ public class IndexCache extends AbstractIndexComponent implements Closeable { private final QueryCache queryCache; private final BitsetFilterCache bitsetFilterCache; + private final PercolatorQueryCache percolatorQueryCache; - public IndexCache(IndexSettings indexSettings, QueryCache queryCache, BitsetFilterCache bitsetFilterCache) { + public IndexCache(IndexSettings indexSettings, QueryCache queryCache, BitsetFilterCache bitsetFilterCache, + PercolatorQueryCache percolatorQueryCache) { super(indexSettings); this.queryCache = queryCache; this.bitsetFilterCache = bitsetFilterCache; + this.percolatorQueryCache = percolatorQueryCache; } public QueryCache query() { @@ -53,9 +57,13 @@ public class IndexCache extends AbstractIndexComponent implements Closeable { return bitsetFilterCache; } + public PercolatorQueryCache getPercolatorQueryCache() { + return percolatorQueryCache; + } + @Override public void close() throws IOException { - IOUtils.close(queryCache, bitsetFilterCache); + IOUtils.close(queryCache, bitsetFilterCache, percolatorQueryCache); } public void clear(String reason) { diff --git a/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java b/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java index f7802330ab7..19ec3c8402e 100644 --- a/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java +++ b/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java @@ -38,6 +38,7 @@ import org.elasticsearch.common.cache.RemovalListener; import org.elasticsearch.common.cache.RemovalNotification; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.IndexSettings; @@ -70,7 +71,8 @@ import java.util.concurrent.Executor; */ public final class BitsetFilterCache extends AbstractIndexComponent implements LeafReader.CoreClosedListener, RemovalListener>, Closeable { - public static final Setting INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING = Setting.boolSetting("index.load_fixed_bitset_filters_eagerly", true, false, Setting.Scope.INDEX); + public static final Setting INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING = + Setting.boolSetting("index.load_fixed_bitset_filters_eagerly", true, Property.IndexScope); private final boolean loadRandomAccessFiltersEagerly; private final Cache> loadedFilters; diff --git a/core/src/main/java/org/elasticsearch/index/codec/CodecService.java b/core/src/main/java/org/elasticsearch/index/codec/CodecService.java index 432f81da8a9..3edc509b7eb 100644 --- a/core/src/main/java/org/elasticsearch/index/codec/CodecService.java +++ b/core/src/main/java/org/elasticsearch/index/codec/CodecService.java @@ -22,6 +22,7 @@ package org.elasticsearch.index.codec; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode; import org.apache.lucene.codecs.lucene54.Lucene54Codec; +import org.apache.lucene.codecs.lucene60.Lucene60Codec; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.logging.ESLogger; @@ -47,8 +48,8 @@ public class CodecService { public CodecService(@Nullable MapperService mapperService, ESLogger logger) { final MapBuilder codecs = MapBuilder.newMapBuilder(); if (mapperService == null) { - codecs.put(DEFAULT_CODEC, new Lucene54Codec()); - codecs.put(BEST_COMPRESSION_CODEC, new Lucene54Codec(Mode.BEST_COMPRESSION)); + codecs.put(DEFAULT_CODEC, new Lucene60Codec()); + codecs.put(BEST_COMPRESSION_CODEC, new Lucene60Codec(Mode.BEST_COMPRESSION)); } else { codecs.put(DEFAULT_CODEC, new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger)); diff --git a/core/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java b/core/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java index 7663a322be6..a4977baa1f2 100644 --- a/core/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java +++ b/core/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java @@ -22,7 +22,7 @@ package org.elasticsearch.index.codec; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat; -import org.apache.lucene.codecs.lucene54.Lucene54Codec; +import org.apache.lucene.codecs.lucene60.Lucene60Codec; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.index.mapper.MappedFieldType; @@ -38,7 +38,7 @@ import org.elasticsearch.index.mapper.core.CompletionFieldMapper; * configured for a specific field the default postings format is used. */ // LUCENE UPGRADE: make sure to move to a new codec depending on the lucene version -public class PerFieldMappingPostingFormatCodec extends Lucene54Codec { +public class PerFieldMappingPostingFormatCodec extends Lucene60Codec { private final ESLogger logger; private final MapperService mapperService; diff --git a/core/src/main/java/org/elasticsearch/index/engine/DeleteVersionValue.java b/core/src/main/java/org/elasticsearch/index/engine/DeleteVersionValue.java index 5a7f481eaad..a2900f649ef 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/DeleteVersionValue.java +++ b/core/src/main/java/org/elasticsearch/index/engine/DeleteVersionValue.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.engine; -import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.index.translog.Translog; /** Holds a deleted version, which just adds a timestamp to {@link VersionValue} so we know when we can expire the deletion. */ @@ -44,6 +43,6 @@ class DeleteVersionValue extends VersionValue { @Override public long ramBytesUsed() { - return super.ramBytesUsed() + RamUsageEstimator.NUM_BYTES_LONG; + return super.ramBytesUsed() + Long.BYTES; } } diff --git a/core/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java b/core/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java index 6dd710e4e89..965a2e58f9c 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java +++ b/core/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java @@ -129,9 +129,9 @@ class ElasticsearchConcurrentMergeScheduler extends ConcurrentMergeScheduler { merge.rateLimiter.getMBPerSec()); if (tookMS > 20000) { // if more than 20 seconds, DEBUG log it - logger.debug(message); + logger.debug("{}", message); } else if (logger.isTraceEnabled()) { - logger.trace(message); + logger.trace("{}", message); } } } diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java index 3c5583440e0..bb7aa0ea71a 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -671,7 +671,7 @@ public abstract class Engine implements Closeable { closeNoLock("engine failed on: [" + reason + "]"); } finally { if (failedEngine != null) { - logger.debug("tried to fail engine but engine is already failed. ignoring. [{}]", reason, failure); + logger.debug("tried to fail engine but engine is already failed. ignoring. [{}]", failure, reason); return; } logger.warn("failed engine [{}]", failure, reason); @@ -697,7 +697,7 @@ public abstract class Engine implements Closeable { store.decRef(); } } else { - logger.debug("tried to fail engine but could not acquire lock - engine should be failed by now [{}]", reason, failure); + logger.debug("tried to fail engine but could not acquire lock - engine should be failed by now [{}]", failure, reason); } } diff --git a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index 9740ccd0358..a290e98f3f7 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -26,6 +26,7 @@ import org.apache.lucene.search.QueryCache; import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.similarities.Similarity; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -39,7 +40,7 @@ import org.elasticsearch.index.translog.TranslogConfig; import org.elasticsearch.indices.IndexingMemoryController; import org.elasticsearch.threadpool.ThreadPool; -import java.util.Set; +import java.util.function.Function; /* * Holds all the configuration that is used to create an {@link Engine}. @@ -70,20 +71,23 @@ public final class EngineConfig { /** * Index setting to change the low level lucene codec used for writing new segments. * This setting is not realtime updateable. + * This setting is also settable on the node and the index level, it's commonly used in hot/cold node archs where index is likely + * allocated on both `kind` of nodes. */ - public static final Setting INDEX_CODEC_SETTING = new Setting<>("index.codec", "default", (s) -> { - switch(s) { + public static final Setting INDEX_CODEC_SETTING = new Setting<>("index.codec", "default", s -> { + switch (s) { case "default": case "best_compression": case "lucene_default": return s; default: if (Codec.availableCodecs().contains(s) == false) { // we don't error message the not officially supported ones - throw new IllegalArgumentException("unknown value for [index.codec] must be one of [default, best_compression] but was: " + s); + throw new IllegalArgumentException( + "unknown value for [index.codec] must be one of [default, best_compression] but was: " + s); } return s; } - }, false, Setting.Scope.INDEX); + }, Property.IndexScope, Property.NodeScope); /** if set to true the engine will start even if the translog id in the commit point can not be found */ public static final String INDEX_FORCE_NEW_TRANSLOG = "index.engine.force_new_translog"; @@ -98,7 +102,8 @@ public final class EngineConfig { IndexSettings indexSettings, Engine.Warmer warmer, Store store, SnapshotDeletionPolicy deletionPolicy, MergePolicy mergePolicy,Analyzer analyzer, Similarity similarity, CodecService codecService, Engine.EventListener eventListener, - TranslogRecoveryPerformer translogRecoveryPerformer, QueryCache queryCache, QueryCachingPolicy queryCachingPolicy, TranslogConfig translogConfig, TimeValue flushMergesAfter) { + TranslogRecoveryPerformer translogRecoveryPerformer, QueryCache queryCache, QueryCachingPolicy queryCachingPolicy, + TranslogConfig translogConfig, TimeValue flushMergesAfter) { this.shardId = shardId; final Settings settings = indexSettings.getSettings(); this.indexSettings = indexSettings; @@ -139,7 +144,8 @@ public final class EngineConfig { } /** - * Returns the initial index buffer size. This setting is only read on startup and otherwise controlled by {@link IndexingMemoryController} + * Returns the initial index buffer size. This setting is only read on startup and otherwise controlled + * by {@link IndexingMemoryController} */ public ByteSizeValue getIndexingBufferSize() { return indexingBufferSize; @@ -147,11 +153,12 @@ public final class EngineConfig { /** * Returns true iff delete garbage collection in the engine should be enabled. This setting is updateable - * in realtime and forces a volatile read. Consumers can safely read this value directly go fetch it's latest value. The default is true + * in realtime and forces a volatile read. Consumers can safely read this value directly go fetch it's latest value. + * The default is true *

    * Engine GC deletion if enabled collects deleted documents from in-memory realtime data structures after a certain amount of - * time ({@link IndexSettings#getGcDeletesInMillis()} if enabled. Before deletes are GCed they will cause re-adding the document that was deleted - * to fail. + * time ({@link IndexSettings#getGcDeletesInMillis()} if enabled. Before deletes are GCed they will cause re-adding the document + * that was deleted to fail. *

    */ public boolean isEnableGcDeletes() { @@ -169,7 +176,8 @@ public final class EngineConfig { } /** - * Returns a thread-pool mainly used to get estimated time stamps from {@link org.elasticsearch.threadpool.ThreadPool#estimatedTimeInMillis()} and to schedule + * Returns a thread-pool mainly used to get estimated time stamps from + * {@link org.elasticsearch.threadpool.ThreadPool#estimatedTimeInMillis()} and to schedule * async force merge calls on the {@link org.elasticsearch.threadpool.ThreadPool.Names#FORCE_MERGE} thread-pool */ public ThreadPool getThreadPool() { @@ -184,8 +192,9 @@ public final class EngineConfig { } /** - * Returns the {@link org.elasticsearch.index.store.Store} instance that provides access to the {@link org.apache.lucene.store.Directory} - * used for the engines {@link org.apache.lucene.index.IndexWriter} to write it's index files to. + * Returns the {@link org.elasticsearch.index.store.Store} instance that provides access to the + * {@link org.apache.lucene.store.Directory} used for the engines {@link org.apache.lucene.index.IndexWriter} to write it's index files + * to. *

    * Note: In order to use this instance the consumer needs to increment the stores reference before it's used the first time and hold * it's reference until it's not needed anymore. diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index c412ce3b85f..dc0669e02b7 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -275,7 +275,7 @@ public class InternalEngine extends Engine { SearcherManager searcherManager = null; try { try { - final DirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter, true), shardId); + final DirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter), shardId); searcherManager = new SearcherManager(directoryReader, searcherFactory); lastCommittedSegmentInfos = readLastCommittedSegmentInfos(searcherManager, store); success = true; @@ -743,7 +743,9 @@ public class InternalEngine extends Engine { indexWriter.forceMerge(maxNumSegments, true /* blocks and waits for merges*/); } if (flush) { - flush(true, true); + if (tryRenewSyncCommit() == false) { + flush(false, true); + } } if (upgrade) { logger.info("finished segment upgrade"); diff --git a/core/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java b/core/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java index 747e955b179..f962d31bf8b 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java +++ b/core/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java @@ -64,7 +64,7 @@ class LiveVersionMap implements ReferenceManager.RefreshListener, Accountable { * * NUM_BYTES_OBJECT_HEADER + 2*NUM_BYTES_INT + NUM_BYTES_OBJECT_REF + NUM_BYTES_ARRAY_HEADER [ + bytes.length] */ private static final int BASE_BYTES_PER_BYTESREF = RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + - 2*RamUsageEstimator.NUM_BYTES_INT + + 2*Integer.BYTES + RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_ARRAY_HEADER; @@ -76,7 +76,7 @@ class LiveVersionMap implements ReferenceManager.RefreshListener, Accountable { * CHM's pointer to CHM.Entry, double for approx load factor: * + 2*NUM_BYTES_OBJECT_REF */ private static final int BASE_BYTES_PER_CHM_ENTRY = RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + - RamUsageEstimator.NUM_BYTES_INT + + Integer.BYTES + 5*RamUsageEstimator.NUM_BYTES_OBJECT_REF; /** Tracks bytes used by current map, i.e. what is freed on refresh. For deletes, which are also added to tombstones, we only account diff --git a/core/src/main/java/org/elasticsearch/index/engine/VersionValue.java b/core/src/main/java/org/elasticsearch/index/engine/VersionValue.java index 950dbdbae65..6b780c2a6a3 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/VersionValue.java +++ b/core/src/main/java/org/elasticsearch/index/engine/VersionValue.java @@ -54,7 +54,7 @@ class VersionValue implements Accountable { @Override public long ramBytesUsed() { - return RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + RamUsageEstimator.NUM_BYTES_LONG + RamUsageEstimator.NUM_BYTES_OBJECT_REF + translogLocation.ramBytesUsed(); + return RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + Long.BYTES + RamUsageEstimator.NUM_BYTES_OBJECT_REF + translogLocation.ramBytesUsed(); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java index ffa23bf56e4..172e16d8f35 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java @@ -20,10 +20,14 @@ package org.elasticsearch.index.fielddata; import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldComparatorSource; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.SortField; import org.apache.lucene.search.Weight; @@ -122,11 +126,11 @@ public interface IndexFieldData extends IndexCompone public static class Nested { private final BitSetProducer rootFilter; - private final Weight innerFilter; + private final Query innerQuery; - public Nested(BitSetProducer rootFilter, Weight innerFilter) { + public Nested(BitSetProducer rootFilter, Query innerQuery) { this.rootFilter = rootFilter; - this.innerFilter = innerFilter; + this.innerQuery = innerQuery; } /** @@ -140,7 +144,10 @@ public interface IndexFieldData extends IndexCompone * Get a {@link DocIdSet} that matches the inner documents. */ public DocIdSetIterator innerDocs(LeafReaderContext ctx) throws IOException { - Scorer s = innerFilter.scorer(ctx); + final IndexReaderContext topLevelCtx = ReaderUtil.getTopLevelContext(ctx); + IndexSearcher indexSearcher = new IndexSearcher(topLevelCtx); + Weight weight = indexSearcher.createNormalizedWeight(innerQuery, false); + Scorer s = weight.scorer(ctx); return s == null ? null : s.iterator(); } } diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java b/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java index 94e9edc5b94..24e49ec63e6 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java @@ -24,6 +24,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.fielddata.plain.AbstractGeoPointDVIndexFieldData; @@ -67,7 +68,7 @@ public class IndexFieldDataService extends AbstractIndexComponent implements Clo default: throw new IllegalArgumentException("failed to parse [" + s + "] must be one of [node,node]"); } - }, false, Setting.Scope.INDEX); + }, Property.IndexScope); private static final IndexFieldData.Builder MISSING_DOC_VALUES_BUILDER = (indexProperties, fieldType, cache, breakerService, mapperService1) -> { throw new IllegalStateException("Can't load fielddata on [" + fieldType.name() @@ -230,13 +231,13 @@ public class IndexFieldDataService extends AbstractIndexComponent implements Clo IndexFieldData.Builder builder = null; String format = type.getFormat(indexSettings.getSettings()); if (format != null && FieldDataType.DOC_VALUES_FORMAT_VALUE.equals(format) && !docValues) { - logger.warn("field [" + fieldName + "] has no doc values, will use default field data format"); + logger.warn("field [{}] has no doc values, will use default field data format", fieldName); format = null; } if (format != null) { builder = buildersByTypeAndFormat.get(Tuple.tuple(type.getType(), format)); if (builder == null) { - logger.warn("failed to find format [" + format + "] for field [" + fieldName + "], will use default"); + logger.warn("failed to find format [{}] for field [{}], will use default", format, fieldName); } } if (builder == null && docValues) { diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/SortedBinaryDocValues.java b/core/src/main/java/org/elasticsearch/index/fielddata/SortedBinaryDocValues.java index 58a7c9758b7..b3c51141e20 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/SortedBinaryDocValues.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/SortedBinaryDocValues.java @@ -23,7 +23,7 @@ import org.apache.lucene.util.BytesRef; /** * A list of per-document binary values, sorted - * according to {@link BytesRef#getUTF8SortedAsUnicodeComparator()}. + * according to {@link BytesRef#compareTo(BytesRef)}. * There might be dups however. */ public abstract class SortedBinaryDocValues { diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/OrdinalsBuilder.java b/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/OrdinalsBuilder.java index bdc121b134b..2b69afa5f82 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/OrdinalsBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/OrdinalsBuilder.java @@ -30,8 +30,8 @@ import org.apache.lucene.util.BitSet; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; import org.apache.lucene.util.FixedBitSet; +import org.apache.lucene.util.LegacyNumericUtils; import org.apache.lucene.util.LongsRef; -import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.packed.GrowableWriter; import org.apache.lucene.util.packed.PackedInts; import org.apache.lucene.util.packed.PagedGrowableWriter; @@ -459,7 +459,7 @@ public final class OrdinalsBuilder implements Closeable { @Override protected AcceptStatus accept(BytesRef term) throws IOException { // we stop accepting terms once we moved across the prefix codec terms - redundant values! - return NumericUtils.getPrefixCodedLongShift(term) == 0 ? AcceptStatus.YES : AcceptStatus.END; + return LegacyNumericUtils.getPrefixCodedLongShift(term) == 0 ? AcceptStatus.YES : AcceptStatus.END; } }; } @@ -475,7 +475,7 @@ public final class OrdinalsBuilder implements Closeable { @Override protected AcceptStatus accept(BytesRef term) throws IOException { // we stop accepting terms once we moved across the prefix codec terms - redundant values! - return NumericUtils.getPrefixCodedIntShift(term) == 0 ? AcceptStatus.YES : AcceptStatus.END; + return LegacyNumericUtils.getPrefixCodedIntShift(term) == 0 ? AcceptStatus.YES : AcceptStatus.END; } }; } diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexGeoPointFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexGeoPointFieldData.java index 022e3ad0923..2c41dece3de 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexGeoPointFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexGeoPointFieldData.java @@ -24,7 +24,7 @@ import org.apache.lucene.spatial.util.GeoEncodingUtils; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; import org.apache.lucene.util.CharsRefBuilder; -import org.apache.lucene.util.NumericUtils; +import org.apache.lucene.util.LegacyNumericUtils; import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.geo.GeoPoint; @@ -62,7 +62,7 @@ abstract class AbstractIndexGeoPointFieldData extends AbstractIndexFieldData leaves; private final AtomicParentChildFieldData[] fielddata; - private final IndexReader reader; private final long ramBytesUsed; private final Map ordinalMapPerType; GlobalFieldData(IndexReader reader, AtomicParentChildFieldData[] fielddata, long ramBytesUsed, Map ordinalMapPerType) { - this.reader = reader; + this.coreCacheKey = reader.getCoreCacheKey(); + this.leaves = reader.leaves(); this.ramBytesUsed = ramBytesUsed; this.fielddata = fielddata; this.ordinalMapPerType = ordinalMapPerType; @@ -329,7 +331,7 @@ public class ParentChildIndexFieldData extends AbstractIndexFieldData> FieldStats get(String field) throws IOException { + MappedFieldType mappedFieldType = mapperService.fullName(field); + if (mappedFieldType != null) { + IndexReader reader = searcher.reader(); + Terms terms = MultiFields.getTerms(reader, field); + if (terms != null) { + return mappedFieldType.stats(terms, reader.maxDoc()); + } + } + return null; + } + + /** + * @param fieldName + * the fieldName to check + * @param from + * the minimum value for the query + * @param to + * the maximum value for the query + * @param includeLower + * whether the from value is inclusive + * @param includeUpper + * whether the to value is inclusive + * @param timeZone + * the timeZone to use for date fields + * @param dateMathParser + * the {@link DateMathParser} to use for date fields + * @return A {@link Relation} indicating the overlap of the range of terms + * for the field with the query range. This method will return: + *

      + *
    • {@link Relation#WITHIN} if the range of terms for the field + * in the shard is completely within the query range
    • + *
    • {@link Relation#DISJOINT} if the range of terms for the field + * in the shard is completely outside the query range
    • + *
    • {@link Relation#INTERSECTS} if the range of terms for the + * field in the shard intersects with the query range
    • + *
    + * @throws IOException + * if the index cannot be read + */ + public Relation isFieldWithinQuery(String fieldName, Object from, Object to, boolean includeLower, boolean includeUpper, + DateTimeZone timeZone, DateMathParser dateMathParser) throws IOException { + MappedFieldType mappedFieldType = mapperService.fullName(fieldName); + FieldStats fieldStats = get(fieldName); + if (fieldStats == null) { + // No fieldStats for the field so the field doesn't exist on + // this shard, so relation is DISJOINT + return Relation.DISJOINT; + } else { + // Convert the from and to values to Strings so they can be used + // in the IndexConstraints. Since DateTime is represented as a + // Long field in Lucene we need to use the millisecond value of + // the DateTime in that case + String fromString = null; + if (from != null) { + if (mappedFieldType instanceof DateFieldType) { + long millis = ((DateFieldType) mappedFieldType).parseToMilliseconds(from, !includeLower, timeZone, dateMathParser); + fromString = fieldStats.stringValueOf(millis, null); + } else if (mappedFieldType instanceof IpFieldType) { + if (from instanceof BytesRef) { + from = ((BytesRef) from).utf8ToString(); + } + long ipAsLong = ((IpFieldType) mappedFieldType).value(from); + fromString = fieldStats.stringValueOf(ipAsLong, null); + } else { + fromString = fieldStats.stringValueOf(from, null); + } + } + String toString = null; + if (to != null) { + if (mappedFieldType instanceof DateFieldType) { + long millis = ((DateFieldType) mappedFieldType).parseToMilliseconds(to, includeUpper, timeZone, dateMathParser); + toString = fieldStats.stringValueOf(millis, null); + } else if (mappedFieldType instanceof IpFieldType) { + if (to instanceof BytesRef) { + to = ((BytesRef) to).utf8ToString(); + } + long ipAsLong = ((IpFieldType) mappedFieldType).value(to); + toString = fieldStats.stringValueOf(ipAsLong, null); + } else { + toString = fieldStats.stringValueOf(to, null); + } + } + if ((from == null || fieldStats + .match(new IndexConstraint(fieldName, Property.MIN, includeLower ? Comparison.GTE : Comparison.GT, fromString))) + && (to == null || fieldStats.match( + new IndexConstraint(fieldName, Property.MAX, includeUpper ? Comparison.LTE : Comparison.LT, toString)))) { + // If the min and max terms for the field are both within + // the query range then all documents will match so relation is + // WITHIN + return Relation.WITHIN; + } else if ((to != null && fieldStats + .match(new IndexConstraint(fieldName, Property.MIN, includeUpper ? Comparison.GT : Comparison.GTE, toString))) + || (from != null && fieldStats.match( + new IndexConstraint(fieldName, Property.MAX, includeLower ? Comparison.LT : Comparison.LTE, fromString)))) { + // If the min and max terms are both outside the query range + // then no document will match so relation is DISJOINT (N.B. + // since from <= to we only need + // to check one bould for each side of the query range) + return Relation.DISJOINT; + } + } + // Range of terms doesn't match any of the constraints so must INTERSECT + return Relation.INTERSECTS; + } + + /** + * An enum used to describe the relation between the range of terms in a + * shard when compared with a query range + */ + public static enum Relation { + WITHIN, INTERSECTS, DISJOINT; + } +} diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 2694b64783b..cf0c0fbba33 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -19,8 +19,14 @@ package org.elasticsearch.index.mapper; +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; + import org.apache.lucene.document.Field; -import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.util.CloseableThreadLocal; import org.elasticsearch.common.Strings; @@ -48,15 +54,8 @@ import org.elasticsearch.index.mapper.object.ArrayValueMapperParser; import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.mapper.object.RootObjectMapper; -import java.io.Closeable; -import java.io.IOException; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - /** A parser for documents, given mappings from a DocumentMapper */ -class DocumentParser implements Closeable { +final class DocumentParser implements Closeable { private CloseableThreadLocal cache = new CloseableThreadLocal() { @Override @@ -99,7 +98,7 @@ class DocumentParser implements Closeable { reverseOrder(context); - ParsedDocument doc = parsedDocument(source, context, update(context, mapping)); + ParsedDocument doc = parsedDocument(source, context, createDynamicUpdate(mapping, docMapper, context.getDynamicMappers())); // reset the context to free up memory context.reset(null, null, null); return doc; @@ -116,10 +115,7 @@ class DocumentParser implements Closeable { // entire type is disabled parser.skipChildren(); } else if (emptyDoc == false) { - Mapper update = parseObject(context, mapping.root, true); - if (update != null) { - context.addDynamicMappingsUpdate(update); - } + parseObjectOrNested(context, mapping.root, true); } for (MetadataFieldMapper metadataMapper : mapping.metadataMappers) { @@ -201,11 +197,6 @@ class DocumentParser implements Closeable { } - private static Mapping update(ParseContext.InternalParseContext context, Mapping mapping) { - Mapper rootDynamicUpdate = context.dynamicMappingsUpdate(); - return rootDynamicUpdate != null ? mapping.mappingUpdate(rootDynamicUpdate) : null; - } - private static MapperParsingException wrapInMapperParsingException(SourceToParse source, Throwable e) { // if its already a mapper parsing exception, no need to wrap it... if (e instanceof MapperParsingException) { @@ -220,10 +211,156 @@ class DocumentParser implements Closeable { return new MapperParsingException("failed to parse", e); } - static ObjectMapper parseObject(ParseContext context, ObjectMapper mapper, boolean atRoot) throws IOException { + /** Creates a Mapping containing any dynamically added fields, or returns null if there were no dynamic mappings. */ + static Mapping createDynamicUpdate(Mapping mapping, DocumentMapper docMapper, List dynamicMappers) { + if (dynamicMappers.isEmpty()) { + return null; + } + // We build a mapping by first sorting the mappers, so that all mappers containing a common prefix + // will be processed in a contiguous block. When the prefix is no longer seen, we pop the extra elements + // off the stack, merging them upwards into the existing mappers. + Collections.sort(dynamicMappers, (Mapper o1, Mapper o2) -> o1.name().compareTo(o2.name())); + Iterator dynamicMapperItr = dynamicMappers.iterator(); + List parentMappers = new ArrayList<>(); + Mapper firstUpdate = dynamicMapperItr.next(); + parentMappers.add(createUpdate(mapping.root(), firstUpdate.name().split("\\."), 0, firstUpdate)); + Mapper previousMapper = null; + while (dynamicMapperItr.hasNext()) { + Mapper newMapper = dynamicMapperItr.next(); + if (previousMapper != null && newMapper.name().equals(previousMapper.name())) { + // We can see the same mapper more than once, for example, if we had foo.bar and foo.baz, where + // foo did not yet exist. This will create 2 copies in dynamic mappings, which should be identical. + // Here we just skip over the duplicates, but we merge them to ensure there are no conflicts. + newMapper.merge(previousMapper, false); + continue; + } + previousMapper = newMapper; + String[] nameParts = newMapper.name().split("\\."); + + // We first need the stack to only contain mappers in common with the previously processed mapper + // For example, if the first mapper processed was a.b.c, and we now have a.d, the stack will contain + // a.b, and we want to merge b back into the stack so it just contains a + int i = removeUncommonMappers(parentMappers, nameParts); + + // Then we need to add back mappers that may already exist within the stack, but are not on it. + // For example, if we processed a.b, followed by an object mapper a.c.d, and now are adding a.c.d.e + // then the stack will only have a on it because we will have already merged a.c.d into the stack. + // So we need to pull a.c, followed by a.c.d, onto the stack so e can be added to the end. + i = expandCommonMappers(parentMappers, nameParts, i); + + // If there are still parents of the new mapper which are not on the stack, we need to pull them + // from the existing mappings. In order to maintain the invariant that the stack only contains + // fields which are updated, we cannot simply add the existing mappers to the stack, since they + // may have other subfields which will not be updated. Instead, we pull the mapper from the existing + // mappings, and build an update with only the new mapper and its parents. This then becomes our + // "new mapper", and can be added to the stack. + if (i < nameParts.length - 1) { + newMapper = createExistingMapperUpdate(parentMappers, nameParts, i, docMapper, newMapper); + } + + if (newMapper instanceof ObjectMapper) { + parentMappers.add((ObjectMapper)newMapper); + } else { + addToLastMapper(parentMappers, newMapper, true); + } + } + popMappers(parentMappers, 1, true); + assert parentMappers.size() == 1; + + return mapping.mappingUpdate(parentMappers.get(0)); + } + + private static void popMappers(List parentMappers, int keepBefore, boolean merge) { + assert keepBefore >= 1; // never remove the root mapper + // pop off parent mappers not needed by the current mapper, + // merging them backwards since they are immutable + for (int i = parentMappers.size() - 1; i >= keepBefore; --i) { + addToLastMapper(parentMappers, parentMappers.remove(i), merge); + } + } + + /** + * Adds a mapper as an update into the last mapper. If merge is true, the new mapper + * will be merged in with other child mappers of the last parent, otherwise it will be a new update. + */ + private static void addToLastMapper(List parentMappers, Mapper mapper, boolean merge) { + assert parentMappers.size() >= 1; + int lastIndex = parentMappers.size() - 1; + ObjectMapper withNewMapper = parentMappers.get(lastIndex).mappingUpdate(mapper); + if (merge) { + withNewMapper = parentMappers.get(lastIndex).merge(withNewMapper, false); + } + parentMappers.set(lastIndex, withNewMapper); + } + + /** + * Removes mappers that exist on the stack, but are not part of the path of the current nameParts, + * Returns the next unprocessed index from nameParts. + */ + private static int removeUncommonMappers(List parentMappers, String[] nameParts) { + int keepBefore = 1; + while (keepBefore < parentMappers.size() && + parentMappers.get(keepBefore).simpleName().equals(nameParts[keepBefore - 1])) { + ++keepBefore; + } + popMappers(parentMappers, keepBefore, true); + return keepBefore - 1; + } + + /** + * Adds mappers from the end of the stack that exist as updates within those mappers. + * Returns the next unprocessed index from nameParts. + */ + private static int expandCommonMappers(List parentMappers, String[] nameParts, int i) { + ObjectMapper last = parentMappers.get(parentMappers.size() - 1); + while (i < nameParts.length - 1 && last.getMapper(nameParts[i]) != null) { + Mapper newLast = last.getMapper(nameParts[i]); + assert newLast instanceof ObjectMapper; + last = (ObjectMapper) newLast; + parentMappers.add(last); + ++i; + } + return i; + } + + /** Creates an update for intermediate object mappers that are not on the stack, but parents of newMapper. */ + private static ObjectMapper createExistingMapperUpdate(List parentMappers, String[] nameParts, int i, + DocumentMapper docMapper, Mapper newMapper) { + String updateParentName = nameParts[i]; + final ObjectMapper lastParent = parentMappers.get(parentMappers.size() - 1); + if (parentMappers.size() > 1) { + // only prefix with parent mapper if the parent mapper isn't the root (which has a fake name) + updateParentName = lastParent.name() + '.' + nameParts[i]; + } + ObjectMapper updateParent = docMapper.objectMappers().get(updateParentName); + assert updateParent != null : updateParentName + " doesn't exist"; + return createUpdate(updateParent, nameParts, i + 1, newMapper); + } + + /** Build an update for the parent which will contain the given mapper and any intermediate fields. */ + private static ObjectMapper createUpdate(ObjectMapper parent, String[] nameParts, int i, Mapper mapper) { + List parentMappers = new ArrayList<>(); + ObjectMapper previousIntermediate = parent; + for (; i < nameParts.length - 1; ++i) { + Mapper intermediate = previousIntermediate.getMapper(nameParts[i]); + assert intermediate != null : "Field " + previousIntermediate.name() + " does not have a subfield " + nameParts[i]; + assert intermediate instanceof ObjectMapper; + parentMappers.add((ObjectMapper)intermediate); + previousIntermediate = (ObjectMapper)intermediate; + } + if (parentMappers.isEmpty() == false) { + // add the new mapper to the stack, and pop down to the original parent level + addToLastMapper(parentMappers, mapper, false); + popMappers(parentMappers, 1, false); + mapper = parentMappers.get(0); + } + return parent.mappingUpdate(mapper); + } + + static void parseObjectOrNested(ParseContext context, ObjectMapper mapper, boolean atRoot) throws IOException { if (mapper.isEnabled() == false) { context.parser().skipChildren(); - return null; + return; } XContentParser parser = context.parser(); @@ -234,7 +371,7 @@ class DocumentParser implements Closeable { XContentParser.Token token = parser.currentToken(); if (token == XContentParser.Token.VALUE_NULL) { // the object is null ("obj1" : null), simply bail - return null; + return; } if (token.isValue()) { @@ -256,21 +393,19 @@ class DocumentParser implements Closeable { } ObjectMapper update = null; - update = innerParseObject(context, mapper, parser, currentFieldName, token, update); + innerParseObject(context, mapper, parser, currentFieldName, token); // restore the enable path flag if (nested.isNested()) { nested(context, nested); } - return update; } - private static ObjectMapper innerParseObject(ParseContext context, ObjectMapper mapper, XContentParser parser, String currentFieldName, XContentParser.Token token, ObjectMapper update) throws IOException { + private static void innerParseObject(ParseContext context, ObjectMapper mapper, XContentParser parser, String currentFieldName, XContentParser.Token token) throws IOException { while (token != XContentParser.Token.END_OBJECT) { - ObjectMapper newUpdate = null; if (token == XContentParser.Token.START_OBJECT) { - newUpdate = parseObject(context, mapper, currentFieldName); + parseObject(context, mapper, currentFieldName); } else if (token == XContentParser.Token.START_ARRAY) { - newUpdate = parseArray(context, mapper, currentFieldName); + parseArray(context, mapper, currentFieldName); } else if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_NULL) { @@ -278,18 +413,10 @@ class DocumentParser implements Closeable { } else if (token == null) { throw new MapperParsingException("object mapping for [" + mapper.name() + "] tried to parse field [" + currentFieldName + "] as object, but got EOF, has a concrete value been provided to it?"); } else if (token.isValue()) { - newUpdate = parseValue(context, mapper, currentFieldName, token); + parseValue(context, mapper, currentFieldName, token); } token = parser.nextToken(); - if (newUpdate != null) { - if (update == null) { - update = newUpdate; - } else { - update = update.merge(newUpdate, false); - } - } } - return update; } private static void nested(ParseContext context, ObjectMapper.Nested nested) { @@ -335,33 +462,29 @@ class DocumentParser implements Closeable { return context; } - private static Mapper parseObjectOrField(ParseContext context, Mapper mapper) throws IOException { + private static void parseObjectOrField(ParseContext context, Mapper mapper) throws IOException { if (mapper instanceof ObjectMapper) { - return parseObject(context, (ObjectMapper) mapper, false); + parseObjectOrNested(context, (ObjectMapper) mapper, false); } else { FieldMapper fieldMapper = (FieldMapper)mapper; Mapper update = fieldMapper.parse(context); + if (update != null) { + context.addDynamicMapper(update); + } if (fieldMapper.copyTo() != null) { parseCopyFields(context, fieldMapper, fieldMapper.copyTo().copyToFields()); } - return update; } } private static ObjectMapper parseObject(final ParseContext context, ObjectMapper mapper, String currentFieldName) throws IOException { - if (currentFieldName == null) { - throw new MapperParsingException("object mapping [" + mapper.name() + "] trying to serialize an object with no field associated with it, current value [" + context.parser().textOrNull() + "]"); - } + assert currentFieldName != null; context.path().add(currentFieldName); ObjectMapper update = null; Mapper objectMapper = mapper.getMapper(currentFieldName); if (objectMapper != null) { - final Mapper subUpdate = parseObjectOrField(context, objectMapper); - if (subUpdate != null) { - // propagate mapping update - update = mapper.mappingUpdate(subUpdate); - } + parseObjectOrField(context, objectMapper); } else { ObjectMapper.Dynamic dynamic = mapper.dynamic(); if (dynamic == null) { @@ -382,8 +505,9 @@ class DocumentParser implements Closeable { } Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path()); objectMapper = builder.build(builderContext); + context.addDynamicMapper(objectMapper); context.path().add(currentFieldName); - update = mapper.mappingUpdate(parseAndMergeUpdate(objectMapper, context)); + parseObjectOrField(context, objectMapper); } else { // not dynamic, read everything up to end object context.parser().skipChildren(); @@ -394,7 +518,7 @@ class DocumentParser implements Closeable { return update; } - private static ObjectMapper parseArray(ParseContext context, ObjectMapper parentMapper, String lastFieldName) throws IOException { + private static void parseArray(ParseContext context, ObjectMapper parentMapper, String lastFieldName) throws IOException { String arrayFieldName = lastFieldName; Mapper mapper = parentMapper.getMapper(lastFieldName); if (mapper != null) { @@ -402,15 +526,9 @@ class DocumentParser implements Closeable { // expects an array, if so we pass the context straight to the mapper and if not // we serialize the array components if (mapper instanceof ArrayValueMapperParser) { - final Mapper subUpdate = parseObjectOrField(context, mapper); - if (subUpdate != null) { - // propagate the mapping update - return parentMapper.mappingUpdate(subUpdate); - } else { - return null; - } + parseObjectOrField(context, mapper); } else { - return parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); + parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); } } else { @@ -423,31 +541,34 @@ class DocumentParser implements Closeable { } else if (dynamic == ObjectMapper.Dynamic.TRUE) { Mapper.Builder builder = context.root().findTemplateBuilder(context, arrayFieldName, "object"); if (builder == null) { - return parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); + parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); + return; } Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path()); mapper = builder.build(builderContext); - if (mapper != null && mapper instanceof ArrayValueMapperParser) { + assert mapper != null; + if (mapper instanceof ArrayValueMapperParser) { + context.addDynamicMapper(mapper); context.path().add(arrayFieldName); - mapper = parseAndMergeUpdate(mapper, context); - return parentMapper.mappingUpdate(mapper); + parseObjectOrField(context, mapper); } else { - return parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); + parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); } } else { - return parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); + // TODO: shouldn't this skip, not parse? + parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); } } } - private static ObjectMapper parseNonDynamicArray(ParseContext context, ObjectMapper mapper, String lastFieldName, String arrayFieldName) throws IOException { + private static void parseNonDynamicArray(ParseContext context, ObjectMapper mapper, String lastFieldName, String arrayFieldName) throws IOException { XContentParser parser = context.parser(); XContentParser.Token token; while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token == XContentParser.Token.START_OBJECT) { - return parseObject(context, mapper, lastFieldName); + parseObject(context, mapper, lastFieldName); } else if (token == XContentParser.Token.START_ARRAY) { - return parseArray(context, mapper, lastFieldName); + parseArray(context, mapper, lastFieldName); } else if (token == XContentParser.Token.FIELD_NAME) { lastFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_NULL) { @@ -455,25 +576,20 @@ class DocumentParser implements Closeable { } else if (token == null) { throw new MapperParsingException("object mapping for [" + mapper.name() + "] with array for [" + arrayFieldName + "] tried to parse as array, but got EOF, is there a mismatch in types for the same field?"); } else { - return parseValue(context, mapper, lastFieldName, token); + parseValue(context, mapper, lastFieldName, token); } } - return null; } - private static ObjectMapper parseValue(final ParseContext context, ObjectMapper parentMapper, String currentFieldName, XContentParser.Token token) throws IOException { + private static void parseValue(final ParseContext context, ObjectMapper parentMapper, String currentFieldName, XContentParser.Token token) throws IOException { if (currentFieldName == null) { throw new MapperParsingException("object mapping [" + parentMapper.name() + "] trying to serialize a value with no field associated with it, current value [" + context.parser().textOrNull() + "]"); } Mapper mapper = parentMapper.getMapper(currentFieldName); if (mapper != null) { - Mapper subUpdate = parseObjectOrField(context, mapper); - if (subUpdate == null) { - return null; - } - return parentMapper.mappingUpdate(subUpdate); + parseObjectOrField(context, mapper); } else { - return parseDynamicValue(context, parentMapper, currentFieldName, token); + parseDynamicValue(context, parentMapper, currentFieldName, token); } } @@ -641,7 +757,7 @@ class DocumentParser implements Closeable { throw new IllegalStateException("Can't handle serializing a dynamic type with content token [" + token + "] and field name [" + currentFieldName + "]"); } - private static ObjectMapper parseDynamicValue(final ParseContext context, ObjectMapper parentMapper, String currentFieldName, XContentParser.Token token) throws IOException { + private static void parseDynamicValue(final ParseContext context, ObjectMapper parentMapper, String currentFieldName, XContentParser.Token token) throws IOException { ObjectMapper.Dynamic dynamic = parentMapper.dynamic(); if (dynamic == null) { dynamic = dynamicOrDefault(context.root().dynamic()); @@ -650,7 +766,7 @@ class DocumentParser implements Closeable { throw new StrictDynamicMappingException(parentMapper.fullPath(), currentFieldName); } if (dynamic == ObjectMapper.Dynamic.FALSE) { - return null; + return; } final String path = context.path().pathAsText(currentFieldName); final Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path()); @@ -668,14 +784,9 @@ class DocumentParser implements Closeable { // try to not introduce a conflict mapper = mapper.updateFieldType(Collections.singletonMap(path, existingFieldType)); } + context.addDynamicMapper(mapper); - mapper = parseAndMergeUpdate(mapper, context); - - ObjectMapper update = null; - if (mapper != null) { - update = parentMapper.mappingUpdate(mapper); - } - return update; + parseObjectOrField(context, mapper); } /** Creates instances of the fields that the current field should be copied to */ @@ -713,8 +824,9 @@ class DocumentParser implements Closeable { // The path of the dest field might be completely different from the current one so we need to reset it context = context.overridePath(new ContentPath(0)); - String[] paths = Strings.splitStringToArray(field, '.'); - String fieldName = paths[paths.length-1]; + // TODO: why Strings.splitStringToArray instead of String.split? + final String[] paths = Strings.splitStringToArray(field, '.'); + final String fieldName = paths[paths.length-1]; ObjectMapper mapper = context.root(); ObjectMapper[] mappers = new ObjectMapper[paths.length-1]; if (paths.length > 1) { @@ -745,6 +857,7 @@ class DocumentParser implements Closeable { if (mapper.nested() != ObjectMapper.Nested.NO) { throw new MapperParsingException("It is forbidden to create dynamic nested objects ([" + context.path().pathAsText(paths[i]) + "]) through `copy_to`"); } + context.addDynamicMapper(mapper); break; case FALSE: // Maybe we should log something to tell the user that the copy_to is ignored in this case. @@ -759,36 +872,10 @@ class DocumentParser implements Closeable { parent = mapper; } } - ObjectMapper update = parseDynamicValue(context, mapper, fieldName, context.parser().currentToken()); - assert update != null; // we are parsing a dynamic value so we necessarily created a new mapping - - if (paths.length > 1) { - for (int i = paths.length - 2; i >= 0; i--) { - ObjectMapper parent = context.root(); - if (i > 0) { - parent = mappers[i-1]; - } - assert parent != null; - update = parent.mappingUpdate(update); - } - } - context.addDynamicMappingsUpdate(update); + parseDynamicValue(context, mapper, fieldName, context.parser().currentToken()); } } - /** - * Parse the given {@code context} with the given {@code mapper} and apply - * the potential mapping update in-place. This method is useful when - * composing mapping updates. - */ - private static M parseAndMergeUpdate(M mapper, ParseContext context) throws IOException { - final Mapper update = parseObjectOrField(context, mapper); - if (update != null) { - mapper = (M) mapper.merge(update, false); - } - return mapper; - } - private static ObjectMapper.Dynamic dynamicOrDefault(ObjectMapper.Dynamic dynamic) { return dynamic == null ? ObjectMapper.Dynamic.TRUE : dynamic; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java index dba1355a395..20522abfbac 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.analysis.NamedAnalyzer; @@ -49,8 +50,10 @@ import java.util.Map; import java.util.stream.StreamSupport; public abstract class FieldMapper extends Mapper implements Cloneable { - public static final Setting IGNORE_MALFORMED_SETTING = Setting.boolSetting("index.mapping.ignore_malformed", false, false, Setting.Scope.INDEX); - public static final Setting COERCE_SETTING = Setting.boolSetting("index.mapping.coerce", false, false, Setting.Scope.INDEX); + public static final Setting IGNORE_MALFORMED_SETTING = + Setting.boolSetting("index.mapping.ignore_malformed", false, Property.IndexScope); + public static final Setting COERCE_SETTING = + Setting.boolSetting("index.mapping.coerce", false, Property.IndexScope); public abstract static class Builder extends Mapper.Builder { protected final MappedFieldType fieldType; @@ -200,11 +203,6 @@ public abstract class FieldMapper extends Mapper implements Cloneable { return builder; } - public T normsLoading(MappedFieldType.Loading normsLoading) { - this.fieldType.setNormsLoading(normsLoading); - return builder; - } - public T fieldDataSettings(Settings settings) { this.fieldDataSettings = settings; return builder; @@ -240,6 +238,9 @@ public abstract class FieldMapper extends Mapper implements Cloneable { protected void setupFieldType(BuilderContext context) { fieldType.setName(buildFullName(context)); + if (context.indexCreatedVersion().before(Version.V_5_0_0)) { + fieldType.setOmitNorms(fieldType.omitNorms() && fieldType.boost() == 1.0f); + } if (fieldType.indexAnalyzer() == null && fieldType.tokenized() == false && fieldType.indexOptions() != IndexOptions.NONE) { fieldType.setIndexAnalyzer(Lucene.KEYWORD_ANALYZER); fieldType.setSearchAnalyzer(Lucene.KEYWORD_ANALYZER); @@ -374,7 +375,8 @@ public abstract class FieldMapper extends Mapper implements Cloneable { // this can happen if this mapper represents a mapping update return this; } else if (fieldType.getClass() != newFieldType.getClass()) { - throw new IllegalStateException("Mixing up field types: " + fieldType.getClass() + " != " + newFieldType.getClass()); + throw new IllegalStateException("Mixing up field types: " + + fieldType.getClass() + " != " + newFieldType.getClass() + " on field " + fieldType.name()); } MultiFields updatedMultiFields = multiFields.updateFieldType(fullNameToFieldType); if (fieldType == newFieldType && multiFields == updatedMultiFields) { @@ -415,15 +417,8 @@ public abstract class FieldMapper extends Mapper implements Cloneable { if (includeDefaults || fieldType().storeTermVectors() != defaultFieldType.storeTermVectors()) { builder.field("term_vector", termVectorOptionsToString(fieldType())); } - if (includeDefaults || fieldType().omitNorms() != defaultFieldType.omitNorms() || fieldType().normsLoading() != null) { - builder.startObject("norms"); - if (includeDefaults || fieldType().omitNorms() != defaultFieldType.omitNorms()) { - builder.field("enabled", !fieldType().omitNorms()); - } - if (fieldType().normsLoading() != null) { - builder.field(MappedFieldType.Loading.KEY, fieldType().normsLoading()); - } - builder.endObject(); + if (includeDefaults || fieldType().omitNorms() != defaultFieldType.omitNorms()) { + builder.field("norms", fieldType().omitNorms() == false); } if (indexed && (includeDefaults || fieldType().indexOptions() != defaultFieldType.indexOptions())) { builder.field("index_options", indexOptionToString(fieldType().indexOptions())); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java b/core/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java index 5e9378e2f55..5f6fddf09ef 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java @@ -154,8 +154,6 @@ class FieldTypeLookup implements Iterable { for (MappedFieldType fieldType : this) { if (Regex.simpleMatch(pattern, fieldType.name())) { fields.add(fieldType.name()); - } else if (Regex.simpleMatch(pattern, fieldType.name())) { - fields.add(fieldType.name()); } } return fields; diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java b/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java index 10b165ff4c5..98ad76f7fe1 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java @@ -103,7 +103,6 @@ public abstract class MappedFieldType extends FieldType { private NamedAnalyzer searchAnalyzer; private NamedAnalyzer searchQuoteAnalyzer; private SimilarityProvider similarity; - private Loading normsLoading; private FieldDataType fieldDataType; private Object nullValue; private String nullValueAsString; // for sending null value to _all field @@ -117,7 +116,6 @@ public abstract class MappedFieldType extends FieldType { this.searchAnalyzer = ref.searchAnalyzer(); this.searchQuoteAnalyzer = ref.searchQuoteAnalyzer(); this.similarity = ref.similarity(); - this.normsLoading = ref.normsLoading(); this.fieldDataType = ref.fieldDataType(); this.nullValue = ref.nullValue(); this.nullValueAsString = ref.nullValueAsString(); @@ -158,7 +156,6 @@ public abstract class MappedFieldType extends FieldType { Objects.equals(indexAnalyzer, fieldType.indexAnalyzer) && Objects.equals(searchAnalyzer, fieldType.searchAnalyzer) && Objects.equals(searchQuoteAnalyzer(), fieldType.searchQuoteAnalyzer()) && - Objects.equals(normsLoading, fieldType.normsLoading) && Objects.equals(fieldDataType, fieldType.fieldDataType) && Objects.equals(nullValue, fieldType.nullValue) && Objects.equals(nullValueAsString, fieldType.nullValueAsString); @@ -167,7 +164,7 @@ public abstract class MappedFieldType extends FieldType { @Override public int hashCode() { return Objects.hash(super.hashCode(), name, boost, docValues, indexAnalyzer, searchAnalyzer, searchQuoteAnalyzer, - similarity == null ? null : similarity.name(), normsLoading, fieldDataType, nullValue, nullValueAsString); + similarity == null ? null : similarity.name(), fieldDataType, nullValue, nullValueAsString); } // norelease: we need to override freeze() and add safety checks that all settings are actually set @@ -205,7 +202,7 @@ public abstract class MappedFieldType extends FieldType { conflicts.add("mapper [" + name() + "] has different [doc_values] values"); } if (omitNorms() && !other.omitNorms()) { - conflicts.add("mapper [" + name() + "] has different [omit_norms] values, cannot change from disable to enabled"); + conflicts.add("mapper [" + name() + "] has different [norms] values, cannot change from disable to enabled"); } if (storeTermVectors() != other.storeTermVectors()) { conflicts.add("mapper [" + name() + "] has different [store_term_vector] values"); @@ -242,9 +239,6 @@ public abstract class MappedFieldType extends FieldType { if (boost() != other.boost()) { conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [boost] across all types."); } - if (normsLoading() != other.normsLoading()) { - conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [norms.loading] across all types."); - } if (Objects.equals(searchAnalyzer(), other.searchAnalyzer()) == false) { conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [search_analyzer] across all types."); } @@ -304,15 +298,6 @@ public abstract class MappedFieldType extends FieldType { this.docValues = hasDocValues; } - public Loading normsLoading() { - return normsLoading; - } - - public void setNormsLoading(Loading normsLoading) { - checkIfFrozen(); - this.normsLoading = normsLoading; - } - public NamedAnalyzer indexAnalyzer() { return indexAnalyzer; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java index 4dd43db0517..6a9a402a5ff 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java @@ -76,6 +76,7 @@ public abstract class Mapper implements ToXContent, Iterable { return this.name; } + /** Returns a newly built mapper. */ public abstract Y build(BuilderContext context); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java index b25f5f6a02d..eaf897e7fbd 100755 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -27,17 +27,18 @@ import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.mapper.Mapper.BuilderContext; import org.elasticsearch.index.mapper.object.ObjectMapper; +import org.elasticsearch.index.percolator.PercolatorFieldMapper; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.indices.InvalidTypeNameException; import org.elasticsearch.indices.TypeMissingException; import org.elasticsearch.indices.mapper.MapperRegistry; -import org.elasticsearch.percolator.PercolatorService; import org.elasticsearch.script.ScriptService; import java.io.Closeable; @@ -81,9 +82,11 @@ public class MapperService extends AbstractIndexComponent implements Closeable { } public static final String DEFAULT_MAPPING = "_default_"; - public static final Setting INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING = Setting.longSetting("index.mapping.nested_fields.limit", 50L, 0, true, Setting.Scope.INDEX); + public static final Setting INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING = + Setting.longSetting("index.mapping.nested_fields.limit", 50L, 0, Property.Dynamic, Property.IndexScope); public static final boolean INDEX_MAPPER_DYNAMIC_DEFAULT = true; - public static final Setting INDEX_MAPPER_DYNAMIC_SETTING = Setting.boolSetting("index.mapper.dynamic", INDEX_MAPPER_DYNAMIC_DEFAULT, false, Setting.Scope.INDEX); + public static final Setting INDEX_MAPPER_DYNAMIC_SETTING = + Setting.boolSetting("index.mapper.dynamic", INDEX_MAPPER_DYNAMIC_DEFAULT, Property.IndexScope); private static ObjectHashSet META_FIELDS = ObjectHashSet.from( "_uid", "_id", "_type", "_all", "_parent", "_routing", "_index", "_size", "_timestamp", "_ttl" @@ -330,7 +333,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable { } private boolean typeNameStartsWithIllegalDot(DocumentMapper mapper) { - return mapper.type().startsWith(".") && !PercolatorService.TYPE_NAME.equals(mapper.type()); + return mapper.type().startsWith(".") && !PercolatorFieldMapper.TYPE_NAME.equals(mapper.type()); } private boolean assertSerialization(DocumentMapper mapper) { @@ -402,7 +405,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable { public DocumentMapper parse(String mappingType, CompressedXContent mappingSource, boolean applyDefault) throws MapperParsingException { String defaultMappingSource; - if (PercolatorService.TYPE_NAME.equals(mappingType)) { + if (PercolatorFieldMapper.TYPE_NAME.equals(mappingType)) { defaultMappingSource = this.defaultPercolatorMappingSource; } else { defaultMappingSource = this.defaultMappingSource; diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java b/core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java index 5bd840cf543..da1ae91c9b5 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java @@ -22,6 +22,10 @@ package org.elasticsearch.index.mapper; import com.carrotsearch.hppc.ObjectObjectHashMap; import com.carrotsearch.hppc.ObjectObjectMap; import org.apache.lucene.document.Field; +import org.apache.lucene.document.LegacyIntField; +import org.apache.lucene.document.LegacyLongField; +import org.apache.lucene.document.LegacyFloatField; +import org.apache.lucene.document.LegacyDoubleField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.util.BytesRef; @@ -128,8 +132,8 @@ public abstract class ParseContext { * Returns an array of values of the field specified as the method parameter. * This method returns an empty array when there are no * matching fields. It never returns null. - * For {@link org.apache.lucene.document.IntField}, {@link org.apache.lucene.document.LongField}, {@link - * org.apache.lucene.document.FloatField} and {@link org.apache.lucene.document.DoubleField} it returns the string value of the number. + * For {@link org.apache.lucene.document.LegacyIntField}, {@link org.apache.lucene.document.LegacyLongField}, {@link + * org.apache.lucene.document.LegacyFloatField} and {@link org.apache.lucene.document.LegacyDoubleField} it returns the string value of the number. * If you want the actual numeric field instances back, use {@link #getFields}. * @param name the name of the field * @return a String[] of field values @@ -327,13 +331,13 @@ public abstract class ParseContext { } @Override - public void addDynamicMappingsUpdate(Mapper update) { - in.addDynamicMappingsUpdate(update); + public void addDynamicMapper(Mapper update) { + in.addDynamicMapper(update); } @Override - public Mapper dynamicMappingsUpdate() { - return in.dynamicMappingsUpdate(); + public List getDynamicMappers() { + return in.getDynamicMappers(); } } @@ -365,7 +369,7 @@ public abstract class ParseContext { private AllEntries allEntries = new AllEntries(); - private Mapper dynamicMappingsUpdate = null; + private List dynamicMappers = new ArrayList<>(); public InternalParseContext(@Nullable Settings indexSettings, DocumentMapperParser docMapperParser, DocumentMapper docMapper, ContentPath path) { this.indexSettings = indexSettings; @@ -390,7 +394,7 @@ public abstract class ParseContext { this.source = source == null ? null : sourceToParse.source(); this.path.reset(); this.allEntries = new AllEntries(); - this.dynamicMappingsUpdate = null; + this.dynamicMappers = new ArrayList<>(); } @Override @@ -532,18 +536,13 @@ public abstract class ParseContext { } @Override - public void addDynamicMappingsUpdate(Mapper mapper) { - assert mapper instanceof RootObjectMapper : mapper; - if (dynamicMappingsUpdate == null) { - dynamicMappingsUpdate = mapper; - } else { - dynamicMappingsUpdate = dynamicMappingsUpdate.merge(mapper, false); - } + public void addDynamicMapper(Mapper mapper) { + dynamicMappers.add(mapper); } @Override - public Mapper dynamicMappingsUpdate() { - return dynamicMappingsUpdate; + public List getDynamicMappers() { + return dynamicMappers; } } @@ -743,12 +742,12 @@ public abstract class ParseContext { public abstract StringBuilder stringBuilder(); /** - * Add a dynamic update to the root object mapper. + * Add a new mapper dynamically created while parsing. */ - public abstract void addDynamicMappingsUpdate(Mapper update); + public abstract void addDynamicMapper(Mapper update); /** - * Get dynamic updates to the root object mapper. + * Get dynamic mappers created while parsing. */ - public abstract Mapper dynamicMappingsUpdate(); + public abstract List getDynamicMappers(); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java index 29081c6c913..86818a3999e 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java @@ -23,11 +23,11 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Terms; -import org.apache.lucene.search.NumericRangeQuery; +import org.apache.lucene.search.LegacyNumericRangeQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; -import org.apache.lucene.util.NumericUtils; +import org.apache.lucene.util.LegacyNumericUtils; import org.elasticsearch.Version; import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; @@ -116,7 +116,7 @@ public class ByteFieldMapper extends NumberFieldMapper { static final class ByteFieldType extends NumberFieldType { public ByteFieldType() { - super(NumericType.INT); + super(LegacyNumericType.INT); } protected ByteFieldType(ByteFieldType ref) { @@ -155,13 +155,13 @@ public class ByteFieldMapper extends NumberFieldMapper { @Override public BytesRef indexedValueForSearch(Object value) { BytesRefBuilder bytesRef = new BytesRefBuilder(); - NumericUtils.intToPrefixCoded(parseValue(value), 0, bytesRef); // 0 because of exact match + LegacyNumericUtils.intToPrefixCoded(parseValue(value), 0, bytesRef); // 0 because of exact match return bytesRef.get(); } @Override public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) { - return NumericRangeQuery.newIntRange(name(), numericPrecisionStep(), + return LegacyNumericRangeQuery.newIntRange(name(), numericPrecisionStep(), lowerTerm == null ? null : (int)parseValue(lowerTerm), upperTerm == null ? null : (int)parseValue(upperTerm), includeLower, includeUpper); @@ -171,7 +171,7 @@ public class ByteFieldMapper extends NumberFieldMapper { public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) { byte iValue = parseValue(value); byte iSim = fuzziness.asByte(); - return NumericRangeQuery.newIntRange(name(), numericPrecisionStep(), + return LegacyNumericRangeQuery.newIntRange(name(), numericPrecisionStep(), iValue - iSim, iValue + iSim, true, true); @@ -179,8 +179,8 @@ public class ByteFieldMapper extends NumberFieldMapper { @Override public FieldStats stats(Terms terms, int maxDoc) throws IOException { - long minValue = NumericUtils.getMinInt(terms); - long maxValue = NumericUtils.getMaxInt(terms); + long minValue = LegacyNumericUtils.getMinInt(terms); + long maxValue = LegacyNumericUtils.getMaxInt(terms); return new FieldStats.Long( maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue ); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java index be83f0175c2..724c37fcfcd 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java @@ -23,12 +23,11 @@ import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Terms; -import org.apache.lucene.search.NumericRangeQuery; +import org.apache.lucene.search.LegacyNumericRangeQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; -import org.apache.lucene.util.NumericUtils; -import org.apache.lucene.util.ToStringUtils; +import org.apache.lucene.util.LegacyNumericUtils; import org.elasticsearch.Version; import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; @@ -243,7 +242,6 @@ public class DateFieldMapper extends NumberFieldMapper { .append(" TO ") .append((upperTerm == null) ? "*" : upperTerm.toString()) .append(includeUpper ? ']' : '}') - .append(ToStringUtils.boost(getBoost())) .toString(); } } @@ -253,7 +251,7 @@ public class DateFieldMapper extends NumberFieldMapper { protected DateMathParser dateMathParser = new DateMathParser(dateTimeFormatter); public DateFieldType() { - super(NumericType.LONG); + super(LegacyNumericType.LONG); setFieldDataType(new FieldDataType("long")); } @@ -360,7 +358,7 @@ public class DateFieldMapper extends NumberFieldMapper { @Override public BytesRef indexedValueForSearch(Object value) { BytesRefBuilder bytesRef = new BytesRefBuilder(); - NumericUtils.longToPrefixCoded(parseValue(value), 0, bytesRef); // 0 because of exact match + LegacyNumericUtils.longToPrefixCoded(parseValue(value), 0, bytesRef); // 0 because of exact match return bytesRef.get(); } @@ -392,7 +390,7 @@ public class DateFieldMapper extends NumberFieldMapper { // not a time format iSim = fuzziness.asLong(); } - return NumericRangeQuery.newLongRange(name(), numericPrecisionStep(), + return LegacyNumericRangeQuery.newLongRange(name(), numericPrecisionStep(), iValue - iSim, iValue + iSim, true, true); @@ -400,8 +398,8 @@ public class DateFieldMapper extends NumberFieldMapper { @Override public FieldStats stats(Terms terms, int maxDoc) throws IOException { - long minValue = NumericUtils.getMinLong(terms); - long maxValue = NumericUtils.getMaxLong(terms); + long minValue = LegacyNumericUtils.getMinLong(terms); + long maxValue = LegacyNumericUtils.getMaxLong(terms); return new FieldStats.Date( maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue, dateTimeFormatter() ); @@ -412,17 +410,22 @@ public class DateFieldMapper extends NumberFieldMapper { } private Query innerRangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable DateTimeZone timeZone, @Nullable DateMathParser forcedDateParser) { - return NumericRangeQuery.newLongRange(name(), numericPrecisionStep(), + return LegacyNumericRangeQuery.newLongRange(name(), numericPrecisionStep(), lowerTerm == null ? null : parseToMilliseconds(lowerTerm, !includeLower, timeZone, forcedDateParser == null ? dateMathParser : forcedDateParser), upperTerm == null ? null : parseToMilliseconds(upperTerm, includeUpper, timeZone, forcedDateParser == null ? dateMathParser : forcedDateParser), includeLower, includeUpper); } public long parseToMilliseconds(Object value, boolean inclusive, @Nullable DateTimeZone zone, @Nullable DateMathParser forcedDateParser) { + if (value instanceof Long) { + return ((Long) value).longValue(); + } + DateMathParser dateParser = dateMathParser(); if (forcedDateParser != null) { dateParser = forcedDateParser; } + String strValue; if (value instanceof BytesRef) { strValue = ((BytesRef) value).utf8ToString(); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java index 7d33d09cd99..e7550dc1f92 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java @@ -24,10 +24,11 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Terms; -import org.apache.lucene.search.NumericRangeQuery; +import org.apache.lucene.search.LegacyNumericRangeQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; +import org.apache.lucene.util.LegacyNumericUtils; import org.apache.lucene.util.NumericUtils; import org.elasticsearch.Version; import org.elasticsearch.action.fieldstats.FieldStats; @@ -49,7 +50,6 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import static org.apache.lucene.util.NumericUtils.doubleToSortableLong; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeDoubleValue; import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField; @@ -118,7 +118,7 @@ public class DoubleFieldMapper extends NumberFieldMapper { public static final class DoubleFieldType extends NumberFieldType { public DoubleFieldType() { - super(NumericType.DOUBLE); + super(LegacyNumericType.DOUBLE); } protected DoubleFieldType(DoubleFieldType ref) { @@ -158,13 +158,13 @@ public class DoubleFieldMapper extends NumberFieldMapper { public BytesRef indexedValueForSearch(Object value) { long longValue = NumericUtils.doubleToSortableLong(parseDoubleValue(value)); BytesRefBuilder bytesRef = new BytesRefBuilder(); - NumericUtils.longToPrefixCoded(longValue, 0, bytesRef); // 0 because of exact match + LegacyNumericUtils.longToPrefixCoded(longValue, 0, bytesRef); // 0 because of exact match return bytesRef.get(); } @Override public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) { - return NumericRangeQuery.newDoubleRange(name(), numericPrecisionStep(), + return LegacyNumericRangeQuery.newDoubleRange(name(), numericPrecisionStep(), lowerTerm == null ? null : parseDoubleValue(lowerTerm), upperTerm == null ? null : parseDoubleValue(upperTerm), includeLower, includeUpper); @@ -174,7 +174,7 @@ public class DoubleFieldMapper extends NumberFieldMapper { public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) { double iValue = parseDoubleValue(value); double iSim = fuzziness.asDouble(); - return NumericRangeQuery.newDoubleRange(name(), numericPrecisionStep(), + return LegacyNumericRangeQuery.newDoubleRange(name(), numericPrecisionStep(), iValue - iSim, iValue + iSim, true, true); @@ -182,8 +182,8 @@ public class DoubleFieldMapper extends NumberFieldMapper { @Override public FieldStats stats(Terms terms, int maxDoc) throws IOException { - double minValue = NumericUtils.sortableLongToDouble(NumericUtils.getMinLong(terms)); - double maxValue = NumericUtils.sortableLongToDouble(NumericUtils.getMaxLong(terms)); + double minValue = NumericUtils.sortableLongToDouble(LegacyNumericUtils.getMinLong(terms)); + double maxValue = NumericUtils.sortableLongToDouble(LegacyNumericUtils.getMaxLong(terms)); return new FieldStats.Double( maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue ); @@ -284,7 +284,7 @@ public class DoubleFieldMapper extends NumberFieldMapper { fields.add(field); } if (fieldType().hasDocValues()) { - addDocValue(context, fields, doubleToSortableLong(value)); + addDocValue(context, fields, NumericUtils.doubleToSortableLong(value)); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java index 85c5b619bf1..93cf3a7cfaf 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java @@ -24,10 +24,11 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Terms; -import org.apache.lucene.search.NumericRangeQuery; +import org.apache.lucene.search.LegacyNumericRangeQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; +import org.apache.lucene.util.LegacyNumericUtils; import org.apache.lucene.util.NumericUtils; import org.elasticsearch.Version; import org.elasticsearch.action.fieldstats.FieldStats; @@ -50,7 +51,6 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import static org.apache.lucene.util.NumericUtils.floatToSortableInt; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeFloatValue; import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField; @@ -119,7 +119,7 @@ public class FloatFieldMapper extends NumberFieldMapper { static final class FloatFieldType extends NumberFieldType { public FloatFieldType() { - super(NumericType.FLOAT); + super(LegacyNumericType.FLOAT); } protected FloatFieldType(FloatFieldType ref) { @@ -159,13 +159,13 @@ public class FloatFieldMapper extends NumberFieldMapper { public BytesRef indexedValueForSearch(Object value) { int intValue = NumericUtils.floatToSortableInt(parseValue(value)); BytesRefBuilder bytesRef = new BytesRefBuilder(); - NumericUtils.intToPrefixCoded(intValue, 0, bytesRef); // 0 because of exact match + LegacyNumericUtils.intToPrefixCoded(intValue, 0, bytesRef); // 0 because of exact match return bytesRef.get(); } @Override public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) { - return NumericRangeQuery.newFloatRange(name(), numericPrecisionStep(), + return LegacyNumericRangeQuery.newFloatRange(name(), numericPrecisionStep(), lowerTerm == null ? null : parseValue(lowerTerm), upperTerm == null ? null : parseValue(upperTerm), includeLower, includeUpper); @@ -175,7 +175,7 @@ public class FloatFieldMapper extends NumberFieldMapper { public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) { float iValue = parseValue(value); final float iSim = fuzziness.asFloat(); - return NumericRangeQuery.newFloatRange(name(), numericPrecisionStep(), + return LegacyNumericRangeQuery.newFloatRange(name(), numericPrecisionStep(), iValue - iSim, iValue + iSim, true, true); @@ -183,8 +183,8 @@ public class FloatFieldMapper extends NumberFieldMapper { @Override public FieldStats stats(Terms terms, int maxDoc) throws IOException { - float minValue = NumericUtils.sortableIntToFloat(NumericUtils.getMinInt(terms)); - float maxValue = NumericUtils.sortableIntToFloat(NumericUtils.getMaxInt(terms)); + float minValue = NumericUtils.sortableIntToFloat(LegacyNumericUtils.getMinInt(terms)); + float maxValue = NumericUtils.sortableIntToFloat(LegacyNumericUtils.getMaxInt(terms)); return new FieldStats.Float( maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue ); @@ -296,7 +296,7 @@ public class FloatFieldMapper extends NumberFieldMapper { fields.add(field); } if (fieldType().hasDocValues()) { - addDocValue(context, fields, floatToSortableInt(value)); + addDocValue(context, fields, NumericUtils.floatToSortableInt(value)); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java index 7de62510415..fa7191cafbf 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java @@ -24,11 +24,11 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Terms; -import org.apache.lucene.search.NumericRangeQuery; +import org.apache.lucene.search.LegacyNumericRangeQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; -import org.apache.lucene.util.NumericUtils; +import org.apache.lucene.util.LegacyNumericUtils; import org.elasticsearch.Version; import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; @@ -124,7 +124,7 @@ public class IntegerFieldMapper extends NumberFieldMapper { public static final class IntegerFieldType extends NumberFieldType { public IntegerFieldType() { - super(NumericType.INT); + super(LegacyNumericType.INT); } protected IntegerFieldType(IntegerFieldType ref) { @@ -164,13 +164,13 @@ public class IntegerFieldMapper extends NumberFieldMapper { @Override public BytesRef indexedValueForSearch(Object value) { BytesRefBuilder bytesRef = new BytesRefBuilder(); - NumericUtils.intToPrefixCoded(parseValue(value), 0, bytesRef); // 0 because of exact match + LegacyNumericUtils.intToPrefixCoded(parseValue(value), 0, bytesRef); // 0 because of exact match return bytesRef.get(); } @Override public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) { - return NumericRangeQuery.newIntRange(name(), numericPrecisionStep(), + return LegacyNumericRangeQuery.newIntRange(name(), numericPrecisionStep(), lowerTerm == null ? null : parseValue(lowerTerm), upperTerm == null ? null : parseValue(upperTerm), includeLower, includeUpper); @@ -180,7 +180,7 @@ public class IntegerFieldMapper extends NumberFieldMapper { public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) { int iValue = parseValue(value); int iSim = fuzziness.asInt(); - return NumericRangeQuery.newIntRange(name(), numericPrecisionStep(), + return LegacyNumericRangeQuery.newIntRange(name(), numericPrecisionStep(), iValue - iSim, iValue + iSim, true, true); @@ -188,8 +188,8 @@ public class IntegerFieldMapper extends NumberFieldMapper { @Override public FieldStats stats(Terms terms, int maxDoc) throws IOException { - long minValue = NumericUtils.getMinInt(terms); - long maxValue = NumericUtils.getMaxInt(terms); + long minValue = LegacyNumericUtils.getMinInt(terms); + long maxValue = LegacyNumericUtils.getMaxInt(terms); return new FieldStats.Long( maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue ); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/KeywordFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/KeywordFieldMapper.java index 171bc8de794..744882e1ccd 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/KeywordFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/KeywordFieldMapper.java @@ -85,21 +85,13 @@ public final class KeywordFieldMapper extends FieldMapper implements AllFieldMap @Override public Builder indexOptions(IndexOptions indexOptions) { - if (fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) > 0) { + if (indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS) > 0) { throw new IllegalArgumentException("The [keyword] field does not support positions, got [index_options]=" - + indexOptionToString(fieldType.indexOptions())); + + indexOptionToString(indexOptions)); } return super.indexOptions(indexOptions); } - @Override - protected void setupFieldType(BuilderContext context) { - if (!omitNormsSet && fieldType.boost() != 1.0f) { - fieldType.setOmitNorms(false); - } - super.setupFieldType(context); - } - @Override public KeywordFieldMapper build(BuilderContext context) { setupFieldType(context); @@ -128,6 +120,9 @@ public final class KeywordFieldMapper extends FieldMapper implements AllFieldMap } else if (propName.equals("ignore_above")) { builder.ignoreAbove(XContentMapValues.nodeIntegerValue(propNode, -1)); iterator.remove(); + } else if (propName.equals("norms")) { + builder.omitNorms(XContentMapValues.nodeBooleanValue(propNode) == false); + iterator.remove(); } else if (parseMultiField(builder, name, parserContext, propName, propNode)) { iterator.remove(); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java index 0e9592fd72e..a1acf0ab58a 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java @@ -24,11 +24,11 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Terms; -import org.apache.lucene.search.NumericRangeQuery; +import org.apache.lucene.search.LegacyNumericRangeQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; -import org.apache.lucene.util.NumericUtils; +import org.apache.lucene.util.LegacyNumericUtils; import org.elasticsearch.Version; import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; @@ -123,7 +123,7 @@ public class LongFieldMapper extends NumberFieldMapper { public static class LongFieldType extends NumberFieldType { public LongFieldType() { - super(NumericType.LONG); + super(LegacyNumericType.LONG); } protected LongFieldType(LongFieldType ref) { @@ -162,13 +162,13 @@ public class LongFieldMapper extends NumberFieldMapper { @Override public BytesRef indexedValueForSearch(Object value) { BytesRefBuilder bytesRef = new BytesRefBuilder(); - NumericUtils.longToPrefixCoded(parseLongValue(value), 0, bytesRef); // 0 because of exact match + LegacyNumericUtils.longToPrefixCoded(parseLongValue(value), 0, bytesRef); // 0 because of exact match return bytesRef.get(); } @Override public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) { - return NumericRangeQuery.newLongRange(name(), numericPrecisionStep(), + return LegacyNumericRangeQuery.newLongRange(name(), numericPrecisionStep(), lowerTerm == null ? null : parseLongValue(lowerTerm), upperTerm == null ? null : parseLongValue(upperTerm), includeLower, includeUpper); @@ -178,7 +178,7 @@ public class LongFieldMapper extends NumberFieldMapper { public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) { long iValue = parseLongValue(value); final long iSim = fuzziness.asLong(); - return NumericRangeQuery.newLongRange(name(), numericPrecisionStep(), + return LegacyNumericRangeQuery.newLongRange(name(), numericPrecisionStep(), iValue - iSim, iValue + iSim, true, true); @@ -186,8 +186,8 @@ public class LongFieldMapper extends NumberFieldMapper { @Override public FieldStats stats(Terms terms, int maxDoc) throws IOException { - long minValue = NumericUtils.getMinLong(terms); - long maxValue = NumericUtils.getMaxLong(terms); + long minValue = LegacyNumericUtils.getMinLong(terms); + long maxValue = LegacyNumericUtils.getMaxLong(terms); return new FieldStats.Long( maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue ); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java index 90fb20ef827..4b4c0882508 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java @@ -20,7 +20,7 @@ package org.elasticsearch.index.mapper.core; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.NumericTokenStream; +import org.apache.lucene.analysis.LegacyNumericTokenStream; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; @@ -31,8 +31,10 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.IndexableFieldType; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.Version; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -52,7 +54,9 @@ import java.util.List; * */ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldMapper.IncludeInAll { - private static final Setting COERCE_SETTING = Setting.boolSetting("index.mapping.coerce", true, false, Setting.Scope.INDEX); // this is private since it has a different default + // this is private since it has a different default + private static final Setting COERCE_SETTING = + Setting.boolSetting("index.mapping.coerce", true, Property.IndexScope); public static class Defaults { @@ -113,7 +117,6 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM protected void setupFieldType(BuilderContext context) { super.setupFieldType(context); - fieldType.setOmitNorms(fieldType.omitNorms() && fieldType.boost() == 1.0f); int precisionStep = fieldType.numericPrecisionStep(); if (precisionStep <= 0 || precisionStep >= maxPrecisionStep()) { fieldType.setNumericPrecisionStep(Integer.MAX_VALUE); @@ -129,7 +132,7 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM public static abstract class NumberFieldType extends MappedFieldType { - public NumberFieldType(NumericType numericType) { + public NumberFieldType(LegacyNumericType numericType) { setTokenized(false); setOmitNorms(true); setIndexOptions(IndexOptions.DOCS); @@ -295,38 +298,38 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM // used to we can use a numeric field in a document that is then parsed twice! public abstract static class CustomNumericField extends Field { - private ThreadLocal tokenStream = new ThreadLocal() { + private ThreadLocal tokenStream = new ThreadLocal() { @Override - protected NumericTokenStream initialValue() { - return new NumericTokenStream(fieldType().numericPrecisionStep()); + protected LegacyNumericTokenStream initialValue() { + return new LegacyNumericTokenStream(fieldType().numericPrecisionStep()); } }; - private static ThreadLocal tokenStream4 = new ThreadLocal() { + private static ThreadLocal tokenStream4 = new ThreadLocal() { @Override - protected NumericTokenStream initialValue() { - return new NumericTokenStream(4); + protected LegacyNumericTokenStream initialValue() { + return new LegacyNumericTokenStream(4); } }; - private static ThreadLocal tokenStream8 = new ThreadLocal() { + private static ThreadLocal tokenStream8 = new ThreadLocal() { @Override - protected NumericTokenStream initialValue() { - return new NumericTokenStream(8); + protected LegacyNumericTokenStream initialValue() { + return new LegacyNumericTokenStream(8); } }; - private static ThreadLocal tokenStream16 = new ThreadLocal() { + private static ThreadLocal tokenStream16 = new ThreadLocal() { @Override - protected NumericTokenStream initialValue() { - return new NumericTokenStream(16); + protected LegacyNumericTokenStream initialValue() { + return new LegacyNumericTokenStream(16); } }; - private static ThreadLocal tokenStreamMax = new ThreadLocal() { + private static ThreadLocal tokenStreamMax = new ThreadLocal() { @Override - protected NumericTokenStream initialValue() { - return new NumericTokenStream(Integer.MAX_VALUE); + protected LegacyNumericTokenStream initialValue() { + return new LegacyNumericTokenStream(Integer.MAX_VALUE); } }; @@ -337,7 +340,7 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM } } - protected NumericTokenStream getCachedStream() { + protected LegacyNumericTokenStream getCachedStream() { if (fieldType().numericPrecisionStep() == 4) { return tokenStream4.get(); } else if (fieldType().numericPrecisionStep() == 8) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java index 027f0b1b40b..56b1e9a78f2 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java @@ -24,11 +24,11 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Terms; -import org.apache.lucene.search.NumericRangeQuery; +import org.apache.lucene.search.LegacyNumericRangeQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; -import org.apache.lucene.util.NumericUtils; +import org.apache.lucene.util.LegacyNumericUtils; import org.elasticsearch.Version; import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; @@ -121,7 +121,7 @@ public class ShortFieldMapper extends NumberFieldMapper { static final class ShortFieldType extends NumberFieldType { public ShortFieldType() { - super(NumericType.INT); + super(LegacyNumericType.INT); } protected ShortFieldType(ShortFieldType ref) { @@ -160,13 +160,13 @@ public class ShortFieldMapper extends NumberFieldMapper { @Override public BytesRef indexedValueForSearch(Object value) { BytesRefBuilder bytesRef = new BytesRefBuilder(); - NumericUtils.intToPrefixCoded(parseValue(value), 0, bytesRef); // 0 because of exact match + LegacyNumericUtils.intToPrefixCoded(parseValue(value), 0, bytesRef); // 0 because of exact match return bytesRef.get(); } @Override public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) { - return NumericRangeQuery.newIntRange(name(), numericPrecisionStep(), + return LegacyNumericRangeQuery.newIntRange(name(), numericPrecisionStep(), lowerTerm == null ? null : (int)parseValue(lowerTerm), upperTerm == null ? null : (int)parseValue(upperTerm), includeLower, includeUpper); @@ -176,7 +176,7 @@ public class ShortFieldMapper extends NumberFieldMapper { public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) { short iValue = parseValue(value); short iSim = fuzziness.asShort(); - return NumericRangeQuery.newIntRange(name(), numericPrecisionStep(), + return LegacyNumericRangeQuery.newIntRange(name(), numericPrecisionStep(), iValue - iSim, iValue + iSim, true, true); @@ -184,8 +184,8 @@ public class ShortFieldMapper extends NumberFieldMapper { @Override public FieldStats stats(Terms terms, int maxDoc) throws IOException { - long minValue = NumericUtils.getMinInt(terms); - long maxValue = NumericUtils.getMaxInt(terms); + long minValue = LegacyNumericUtils.getMinInt(terms); + long maxValue = LegacyNumericUtils.getMaxInt(terms); return new FieldStats.Long( maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue ); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java index eda7b7fc87f..4301a2252d8 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java @@ -26,6 +26,9 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -39,9 +42,12 @@ import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.internal.AllFieldMapper; import java.io.IOException; +import java.util.Arrays; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Set; import static org.apache.lucene.index.IndexOptions.NONE; import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField; @@ -52,6 +58,11 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc public static final String CONTENT_TYPE = "string"; private static final int POSITION_INCREMENT_GAP_USE_ANALYZER = -1; + private static final Set SUPPORTED_PARAMETERS_FOR_AUTO_UPGRADE = new HashSet<>(Arrays.asList( + "type", + // most common parameters, for which the upgrade is straightforward + "index", "store", "doc_values", "omit_norms", "norms", "fields", "copy_to")); + public static class Defaults { public static final MappedFieldType FIELD_TYPE = new StringFieldType(); @@ -130,9 +141,47 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc } public static class TypeParser implements Mapper.TypeParser { + private final DeprecationLogger deprecationLogger; + + public TypeParser() { + ESLogger logger = Loggers.getLogger(getClass()); + this.deprecationLogger = new DeprecationLogger(logger); + } + @Override public Mapper.Builder parse(String fieldName, Map node, ParserContext parserContext) throws MapperParsingException { if (parserContext.indexVersionCreated().onOrAfter(Version.V_5_0_0)) { + // Automatically upgrade simple mappings for ease of upgrade, otherwise fail + if (SUPPORTED_PARAMETERS_FOR_AUTO_UPGRADE.containsAll(node.keySet())) { + deprecationLogger.deprecated("The [string] field is deprecated, please use [text] or [keyword] instead on [{}]", + fieldName); + final Object index = node.remove("index"); + final boolean keyword = index != null && "analyzed".equals(index) == false; + { + // upgrade the index setting + node.put("index", "no".equals(index) == false); + } + { + // upgrade norms settings + Object norms = node.remove("norms"); + if (norms instanceof Map) { + norms = ((Map) norms).get("enabled"); + } + if (norms != null) { + node.put("norms", TypeParsers.nodeBooleanValue("norms", norms, parserContext)); + } + Object omitNorms = node.remove("omit_norms"); + if (omitNorms != null) { + node.put("norms", TypeParsers.nodeBooleanValue("omit_norms", omitNorms, parserContext) == false); + } + } + if (keyword) { + return new KeywordFieldMapper.TypeParser().parse(fieldName, node, parserContext); + } else { + return new TextFieldMapper.TypeParser().parse(fieldName, node, parserContext); + } + + } throw new IllegalArgumentException("The [string] type is removed in 5.0. You should now use either a [text] " + "or [keyword] field instead for field [" + fieldName + "]"); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java b/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java index 15fcd9220e2..c6b91292ace 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java @@ -25,7 +25,9 @@ import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.joda.FormatDateTimeFormatter; import org.elasticsearch.common.joda.Joda; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.loader.SettingsLoader; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -39,11 +41,14 @@ import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.similarity.SimilarityProvider; import org.elasticsearch.index.similarity.SimilarityService; +import java.util.Arrays; import java.util.Collections; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.Set; import static org.elasticsearch.common.xcontent.support.XContentMapValues.isArray; import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue; @@ -63,10 +68,18 @@ public class TypeParsers { public static final String INDEX_OPTIONS_POSITIONS = "positions"; public static final String INDEX_OPTIONS_OFFSETS = "offsets"; - private static boolean nodeBooleanValue(Object node, Mapper.TypeParser.ParserContext parserContext) { - if (parserContext.indexVersionCreated().onOrAfter(Version.V_5_0_0)) { + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(TypeParsers.class)); + private static final Set BOOLEAN_STRINGS = new HashSet<>(Arrays.asList("true", "false")); + + public static boolean nodeBooleanValue(String name, Object node, Mapper.TypeParser.ParserContext parserContext) { + // Hook onto ParseFieldMatcher so that parsing becomes strict when setting index.query.parse.strict + if (parserContext.parseFieldMatcher().isStrict()) { return XContentMapValues.nodeBooleanValue(node); } else { + // TODO: remove this leniency in 6.0 + if (BOOLEAN_STRINGS.contains(node.toString()) == false) { + DEPRECATION_LOGGER.deprecated("Expected a boolean for property [{}] but got [{}]", name, node); + } return XContentMapValues.lenientNodeBooleanValue(node); } } @@ -81,13 +94,10 @@ public class TypeParsers { builder.precisionStep(nodeIntegerValue(propNode)); iterator.remove(); } else if (propName.equals("ignore_malformed")) { - builder.ignoreMalformed(nodeBooleanValue(propNode, parserContext)); + builder.ignoreMalformed(nodeBooleanValue("ignore_malformed", propNode, parserContext)); iterator.remove(); } else if (propName.equals("coerce")) { - builder.coerce(nodeBooleanValue(propNode, parserContext)); - iterator.remove(); - } else if (propName.equals("omit_norms")) { - builder.omitNorms(nodeBooleanValue(propNode, parserContext)); + builder.coerce(nodeBooleanValue("coerce", propNode, parserContext)); iterator.remove(); } else if (propName.equals("similarity")) { SimilarityProvider similarityProvider = resolveSimilarity(parserContext, name, propNode.toString()); @@ -112,16 +122,16 @@ public class TypeParsers { parseTermVector(name, propNode.toString(), builder); iterator.remove(); } else if (propName.equals("store_term_vectors")) { - builder.storeTermVectors(nodeBooleanValue(propNode, parserContext)); + builder.storeTermVectors(nodeBooleanValue("store_term_vectors", propNode, parserContext)); iterator.remove(); } else if (propName.equals("store_term_vector_offsets")) { - builder.storeTermVectorOffsets(nodeBooleanValue(propNode, parserContext)); + builder.storeTermVectorOffsets(nodeBooleanValue("store_term_vector_offsets", propNode, parserContext)); iterator.remove(); } else if (propName.equals("store_term_vector_positions")) { - builder.storeTermVectorPositions(nodeBooleanValue(propNode, parserContext)); + builder.storeTermVectorPositions(nodeBooleanValue("store_term_vector_positions", propNode, parserContext)); iterator.remove(); } else if (propName.equals("store_term_vector_payloads")) { - builder.storeTermVectorPayloads(nodeBooleanValue(propNode, parserContext)); + builder.storeTermVectorPayloads(nodeBooleanValue("store_term_vector_payloads", propNode, parserContext)); iterator.remove(); } else if (propName.equals("analyzer")) { NamedAnalyzer analyzer = parserContext.analysisService().analyzer(propNode.toString()); @@ -174,6 +184,37 @@ public class TypeParsers { } } + public static boolean parseNorms(FieldMapper.Builder builder, String propName, Object propNode, Mapper.TypeParser.ParserContext parserContext) { + if (propName.equals("norms")) { + if (propNode instanceof Map) { + final Map properties = nodeMapValue(propNode, "norms"); + for (Iterator> propsIterator = properties.entrySet().iterator(); propsIterator.hasNext();) { + Entry entry2 = propsIterator.next(); + final String propName2 = Strings.toUnderscoreCase(entry2.getKey()); + final Object propNode2 = entry2.getValue(); + if (propName2.equals("enabled")) { + builder.omitNorms(!lenientNodeBooleanValue(propNode2)); + propsIterator.remove(); + } else if (propName2.equals(Loading.KEY)) { + // ignore for bw compat + propsIterator.remove(); + } + } + DocumentMapperParser.checkNoRemainingFields(propName, properties, parserContext.indexVersionCreated()); + DEPRECATION_LOGGER.deprecated("The [norms{enabled:true/false}] way of specifying norms is deprecated, please use [norms:true/false] instead"); + } else { + builder.omitNorms(nodeBooleanValue("norms", propNode, parserContext) == false); + } + return true; + } else if (propName.equals("omit_norms")) { + builder.omitNorms(nodeBooleanValue("norms", propNode, parserContext)); + DEPRECATION_LOGGER.deprecated("[omit_norms] is deprecated, please use [norms] instead with the opposite boolean value"); + return true; + } else { + return false; + } + } + /** * Parse text field attributes. In addition to {@link #parseField common attributes} * this will parse analysis and term-vectors related settings. @@ -181,6 +222,14 @@ public class TypeParsers { public static void parseTextField(FieldMapper.Builder builder, String name, Map fieldNode, Mapper.TypeParser.ParserContext parserContext) { parseField(builder, name, fieldNode, parserContext); parseAnalyzersAndTermVectors(builder, name, fieldNode, parserContext); + for (Iterator> iterator = fieldNode.entrySet().iterator(); iterator.hasNext();) { + Map.Entry entry = iterator.next(); + final String propName = Strings.toUnderscoreCase(entry.getKey()); + final Object propNode = entry.getValue(); + if (parseNorms(builder, propName, propNode, parserContext)) { + iterator.remove(); + } + } } /** @@ -199,35 +248,19 @@ public class TypeParsers { builder.index(parseIndex(name, propNode.toString(), parserContext)); iterator.remove(); } else if (propName.equals(DOC_VALUES)) { - builder.docValues(nodeBooleanValue(propNode, parserContext)); + builder.docValues(nodeBooleanValue(DOC_VALUES, propNode, parserContext)); iterator.remove(); } else if (propName.equals("boost")) { builder.boost(nodeFloatValue(propNode)); iterator.remove(); - } else if (propName.equals("omit_norms")) { - builder.omitNorms(nodeBooleanValue(propNode, parserContext)); - iterator.remove(); - } else if (propName.equals("norms")) { - final Map properties = nodeMapValue(propNode, "norms"); - for (Iterator> propsIterator = properties.entrySet().iterator(); propsIterator.hasNext();) { - Entry entry2 = propsIterator.next(); - final String propName2 = Strings.toUnderscoreCase(entry2.getKey()); - final Object propNode2 = entry2.getValue(); - if (propName2.equals("enabled")) { - builder.omitNorms(!lenientNodeBooleanValue(propNode2)); - propsIterator.remove(); - } else if (propName2.equals(Loading.KEY)) { - builder.normsLoading(Loading.parse(nodeStringValue(propNode2, null), null)); - propsIterator.remove(); - } - } - DocumentMapperParser.checkNoRemainingFields(propName, properties, parserContext.indexVersionCreated()); + } else if (parserContext.indexVersionCreated().before(Version.V_5_0_0) + && parseNorms(builder, propName, propNode, parserContext)) { iterator.remove(); } else if (propName.equals("index_options")) { builder.indexOptions(nodeIndexOptionValue(propNode)); iterator.remove(); } else if (propName.equals("include_in_all")) { - builder.includeInAll(nodeBooleanValue(propNode, parserContext)); + builder.includeInAll(nodeBooleanValue("include_in_all", propNode, parserContext)); iterator.remove(); } else if (propName.equals("similarity")) { SimilarityProvider similarityProvider = resolveSimilarity(parserContext, name, propNode.toString()); @@ -243,7 +276,7 @@ public class TypeParsers { (indexVersionCreated.after(Version.V_2_0_1) && indexVersionCreated.before(Version.V_2_1_0))) { throw new MapperParsingException("copy_to in multi fields is not allowed. Found the copy_to in field [" + name + "] which is within a multi field."); } else { - ESLoggerFactory.getLogger("mapping [" + parserContext.type() + "]").warn("Found a copy_to in field [" + name + "] which is within a multi field. This feature has been removed and the copy_to will be removed from the mapping."); + ESLoggerFactory.getLogger("mapping [" + parserContext.type() + "]").warn("Found a copy_to in field [{}] which is within a multi field. This feature has been removed and the copy_to will be removed from the mapping.", name); } } else { parseCopyFields(propNode, builder); @@ -353,35 +386,32 @@ public class TypeParsers { } public static boolean parseIndex(String fieldName, String index, Mapper.TypeParser.ParserContext parserContext) throws MapperParsingException { - if (parserContext.indexVersionCreated().onOrAfter(Version.V_5_0_0)) { - switch (index) { - case "true": - return true; - case "false": - return false; - default: + switch (index) { + case "true": + return true; + case "false": + return false; + case "not_analyzed": + case "analyzed": + case "no": + if (parserContext.parseFieldMatcher().isStrict() == false) { + DEPRECATION_LOGGER.deprecated("Expected a boolean for property [index] but got [{}]", index); + return "no".equals(index) == false; + } else { throw new IllegalArgumentException("Can't parse [index] value [" + index + "] for field [" + fieldName + "], expected [true] or [false]"); } - } else { - final String normalizedIndex = Strings.toUnderscoreCase(index); - switch (normalizedIndex) { - case "true": - case "not_analyzed": - case "analyzed": - return true; - case "false": - case "no": - return false; - default: - throw new IllegalArgumentException("Can't parse [index] value [" + index + "] for field [" + fieldName + "], expected [true], [false], [no], [not_analyzed] or [analyzed]"); - } + default: + throw new IllegalArgumentException("Can't parse [index] value [" + index + "] for field [" + fieldName + "], expected [true] or [false]"); } } public static boolean parseStore(String fieldName, String store, Mapper.TypeParser.ParserContext parserContext) throws MapperParsingException { - if (parserContext.indexVersionCreated().onOrAfter(Version.V_5_0_0)) { + if (parserContext.parseFieldMatcher().isStrict()) { return XContentMapValues.nodeBooleanValue(store); } else { + if (BOOLEAN_STRINGS.contains(store) == false) { + DEPRECATION_LOGGER.deprecated("Expected a boolean for property [store] but got [{}]", store); + } if ("no".equals(store)) { return false; } else if ("yes".equals(store)) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java index f881d206f0c..f72533d30cf 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java @@ -21,13 +21,15 @@ package org.elasticsearch.index.mapper.geo; import org.apache.lucene.document.Field; import org.apache.lucene.spatial.util.GeoHashUtils; -import org.apache.lucene.util.NumericUtils; +import org.apache.lucene.util.LegacyNumericUtils; import org.elasticsearch.Version; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -56,13 +58,13 @@ import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField; */ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements ArrayValueMapperParser { public static final String CONTENT_TYPE = "geo_point"; + protected static final DeprecationLogger deprecationLogger = new DeprecationLogger(Loggers.getLogger(BaseGeoPointFieldMapper.class)); public static class Names { public static final String LAT = "lat"; public static final String LAT_SUFFIX = "." + LAT; public static final String LON = "lon"; public static final String LON_SUFFIX = "." + LON; public static final String GEOHASH = "geohash"; - public static final String GEOHASH_SUFFIX = "." + GEOHASH; public static final String IGNORE_MALFORMED = "ignore_malformed"; } @@ -194,9 +196,13 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr String propName = Strings.toUnderscoreCase(entry.getKey()); Object propNode = entry.getValue(); if (propName.equals("lat_lon")) { + deprecationLogger.deprecated(CONTENT_TYPE + " lat_lon parameter is deprecated and will be removed " + + "in the next major release"); builder.enableLatLon(XContentMapValues.lenientNodeBooleanValue(propNode)); iterator.remove(); } else if (propName.equals("precision_step")) { + deprecationLogger.deprecated(CONTENT_TYPE + " precision_step parameter is deprecated and will be removed " + + "in the next major release"); builder.precisionStep(XContentMapValues.nodeIntegerValue(propNode)); iterator.remove(); } else if (propName.equals("geohash")) { @@ -483,7 +489,7 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr if (includeDefaults || fieldType().isLatLonEnabled() != GeoPointFieldMapper.Defaults.ENABLE_LATLON) { builder.field("lat_lon", fieldType().isLatLonEnabled()); } - if (fieldType().isLatLonEnabled() && (includeDefaults || fieldType().latFieldType().numericPrecisionStep() != NumericUtils.PRECISION_STEP_DEFAULT)) { + if (fieldType().isLatLonEnabled() && (includeDefaults || fieldType().latFieldType().numericPrecisionStep() != LegacyNumericUtils.PRECISION_STEP_DEFAULT)) { builder.field("precision_step", fieldType().latFieldType().numericPrecisionStep()); } if (includeDefaults || fieldType().isGeoHashEnabled() != Defaults.ENABLE_GEOHASH) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java index 0d84cf21812..75c082dd439 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java @@ -84,7 +84,7 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper { fieldType.setTokenized(false); if (context.indexCreatedVersion().before(Version.V_2_3_0)) { fieldType.setNumericPrecisionStep(GeoPointField.PRECISION_STEP); - fieldType.setNumericType(FieldType.NumericType.LONG); + fieldType.setNumericType(FieldType.LegacyNumericType.LONG); } setupFieldType(context); return new GeoPointFieldMapper(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper, @@ -95,7 +95,7 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper { public GeoPointFieldMapper build(BuilderContext context) { if (context.indexCreatedVersion().before(Version.V_2_3_0)) { fieldType.setNumericPrecisionStep(GeoPointField.PRECISION_STEP); - fieldType.setNumericType(FieldType.NumericType.LONG); + fieldType.setNumericType(FieldType.LegacyNumericType.LONG); } return super.build(context); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java index e90fdae0c47..57778fa8d25 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java @@ -18,9 +18,9 @@ */ package org.elasticsearch.index.mapper.geo; -import com.spatial4j.core.shape.Point; -import com.spatial4j.core.shape.Shape; -import com.spatial4j.core.shape.jts.JtsGeometry; +import org.locationtech.spatial4j.shape.Point; +import org.locationtech.spatial4j.shape.Shape; +import org.locationtech.spatial4j.shape.jts.JtsGeometry; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.spatial.prefix.PrefixTreeStrategy; @@ -58,7 +58,7 @@ import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenien /** - * FieldMapper for indexing {@link com.spatial4j.core.shape.Shape}s. + * FieldMapper for indexing {@link org.locationtech.spatial4j.shape.Shape}s. *

    * Currently Shapes can only be indexed and can only be queried using * {@link org.elasticsearch.index.query.GeoShapeQueryParser}, consequently diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java index 97c2fa3933b..7565243251c 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java @@ -305,7 +305,7 @@ public class AllFieldMapper extends MetadataFieldMapper { builder.field("store_term_vector_payloads", fieldType().storeTermVectorPayloads()); } if (includeDefaults || fieldType().omitNorms() != Defaults.FIELD_TYPE.omitNorms()) { - builder.field("omit_norms", fieldType().omitNorms()); + builder.field("norms", !fieldType().omitNorms()); } doXContentAnalyzers(builder, includeDefaults); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java index 18929bfd833..2ffb5d4ecf5 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java @@ -19,15 +19,17 @@ package org.elasticsearch.index.mapper.ip; -import org.apache.lucene.analysis.NumericTokenStream; +import org.apache.lucene.analysis.LegacyNumericTokenStream; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexOptions; -import org.apache.lucene.search.NumericRangeQuery; +import org.apache.lucene.index.Terms; +import org.apache.lucene.search.LegacyNumericRangeQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; -import org.apache.lucene.util.NumericUtils; +import org.apache.lucene.util.LegacyNumericUtils; import org.elasticsearch.Version; +import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Numbers; @@ -206,7 +208,7 @@ public class IpFieldMapper extends NumberFieldMapper { @Override public BytesRef indexedValueForSearch(Object value) { BytesRefBuilder bytesRef = new BytesRefBuilder(); - NumericUtils.longToPrefixCoded(parseValue(value), 0, bytesRef); // 0 because of exact match + LegacyNumericUtils.longToPrefixCoded(parseValue(value), 0, bytesRef); // 0 because of exact match return bytesRef.get(); } @@ -242,7 +244,7 @@ public class IpFieldMapper extends NumberFieldMapper { @Override public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) { - return NumericRangeQuery.newLongRange(name(), numericPrecisionStep(), + return LegacyNumericRangeQuery.newLongRange(name(), numericPrecisionStep(), lowerTerm == null ? null : parseValue(lowerTerm), upperTerm == null ? null : parseValue(upperTerm), includeLower, includeUpper); @@ -257,11 +259,18 @@ public class IpFieldMapper extends NumberFieldMapper { } catch (IllegalArgumentException e) { iSim = fuzziness.asLong(); } - return NumericRangeQuery.newLongRange(name(), numericPrecisionStep(), + return LegacyNumericRangeQuery.newLongRange(name(), numericPrecisionStep(), iValue - iSim, iValue + iSim, true, true); } + + @Override + public FieldStats stats(Terms terms, int maxDoc) throws IOException { + long minValue = LegacyNumericUtils.getMinLong(terms); + long maxValue = LegacyNumericUtils.getMaxLong(terms); + return new FieldStats.Ip(maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue); + } } protected IpFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, @@ -356,11 +365,11 @@ public class IpFieldMapper extends NumberFieldMapper { public static class NumericIpTokenizer extends NumericTokenizer { public NumericIpTokenizer(int precisionStep, char[] buffer) throws IOException { - super(new NumericTokenStream(precisionStep), buffer, null); + super(new LegacyNumericTokenStream(precisionStep), buffer, null); } @Override - protected void setValue(NumericTokenStream tokenStream, String value) { + protected void setValue(LegacyNumericTokenStream tokenStream, String value) { tokenStream.setLongValue(ipToLong(value)); } } diff --git a/core/src/main/java/org/elasticsearch/index/percolator/PercolateStats.java b/core/src/main/java/org/elasticsearch/index/percolator/PercolateStats.java deleted file mode 100644 index f4c899dff9a..00000000000 --- a/core/src/main/java/org/elasticsearch/index/percolator/PercolateStats.java +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.index.percolator; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentBuilderString; - -import java.io.IOException; - -/** - * Exposes percolator related statistics. - */ -public class PercolateStats implements Streamable, ToXContent { - - private long percolateCount; - private long percolateTimeInMillis; - private long current; - private long memorySizeInBytes = -1; - private long numQueries; - - /** - * Noop constructor for serialization purposes. - */ - public PercolateStats() { - } - - PercolateStats(long percolateCount, long percolateTimeInMillis, long current, long memorySizeInBytes, long numQueries) { - this.percolateCount = percolateCount; - this.percolateTimeInMillis = percolateTimeInMillis; - this.current = current; - this.memorySizeInBytes = memorySizeInBytes; - this.numQueries = numQueries; - } - - /** - * @return The number of times the percolate api has been invoked. - */ - public long getCount() { - return percolateCount; - } - - /** - * @return The total amount of time spend in the percolate api - */ - public long getTimeInMillis() { - return percolateTimeInMillis; - } - - /** - * @return The total amount of time spend in the percolate api - */ - public TimeValue getTime() { - return new TimeValue(getTimeInMillis()); - } - - /** - * @return The total amount of active percolate api invocations. - */ - public long getCurrent() { - return current; - } - - /** - * @return The total number of loaded percolate queries. - */ - public long getNumQueries() { - return numQueries; - } - - /** - * @return Temporarily returns -1, but this used to return the total size the loaded queries take in - * memory, but this is disabled now because the size estimation was too expensive cpu wise. This will be enabled - * again when a cheaper size estimation can be found. - */ - public long getMemorySizeInBytes() { - return memorySizeInBytes; - } - - /** - * @return The total size the loaded queries take in memory. - */ - public ByteSizeValue getMemorySize() { - return new ByteSizeValue(memorySizeInBytes); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(Fields.PERCOLATE); - builder.field(Fields.TOTAL, percolateCount); - builder.timeValueField(Fields.TIME_IN_MILLIS, Fields.TIME, percolateTimeInMillis); - builder.field(Fields.CURRENT, current); - builder.field(Fields.MEMORY_SIZE_IN_BYTES, memorySizeInBytes); - builder.field(Fields.MEMORY_SIZE, getMemorySize()); - builder.field(Fields.QUERIES, getNumQueries()); - builder.endObject(); - return builder; - } - - public void add(PercolateStats percolate) { - if (percolate == null) { - return; - } - - percolateCount += percolate.getCount(); - percolateTimeInMillis += percolate.getTimeInMillis(); - current += percolate.getCurrent(); - numQueries += percolate.getNumQueries(); - } - - static final class Fields { - static final XContentBuilderString PERCOLATE = new XContentBuilderString("percolate"); - static final XContentBuilderString TOTAL = new XContentBuilderString("total"); - static final XContentBuilderString TIME = new XContentBuilderString("time"); - static final XContentBuilderString TIME_IN_MILLIS = new XContentBuilderString("time_in_millis"); - static final XContentBuilderString CURRENT = new XContentBuilderString("current"); - static final XContentBuilderString MEMORY_SIZE_IN_BYTES = new XContentBuilderString("memory_size_in_bytes"); - static final XContentBuilderString MEMORY_SIZE = new XContentBuilderString("memory_size"); - static final XContentBuilderString QUERIES = new XContentBuilderString("queries"); - } - - public static PercolateStats readPercolateStats(StreamInput in) throws IOException { - PercolateStats stats = new PercolateStats(); - stats.readFrom(in); - return stats; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - percolateCount = in.readVLong(); - percolateTimeInMillis = in.readVLong(); - current = in.readVLong(); - numQueries = in.readVLong(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVLong(percolateCount); - out.writeVLong(percolateTimeInMillis); - out.writeVLong(current); - out.writeVLong(numQueries); - } -} diff --git a/core/src/main/java/org/elasticsearch/index/percolator/PercolatorFieldMapper.java b/core/src/main/java/org/elasticsearch/index/percolator/PercolatorFieldMapper.java index f44d454655e..338de5c333d 100644 --- a/core/src/main/java/org/elasticsearch/index/percolator/PercolatorFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/percolator/PercolatorFieldMapper.java @@ -19,34 +19,50 @@ package org.elasticsearch.index.percolator; import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.StoredField; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.search.Query; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.ParseContext; +import org.elasticsearch.index.mapper.core.BinaryFieldMapper; import org.elasticsearch.index.mapper.core.KeywordFieldMapper; +import org.elasticsearch.index.mapper.core.StringFieldMapper; +import org.elasticsearch.index.query.ParsedQuery; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.QueryShardContext; import java.io.IOException; import java.util.Arrays; +import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; public class PercolatorFieldMapper extends FieldMapper { + public static final String TYPE_NAME = ".percolator"; public static final String NAME = "query"; public static final String CONTENT_TYPE = "percolator"; public static final PercolatorFieldType FIELD_TYPE = new PercolatorFieldType(); private static final String EXTRACTED_TERMS_FIELD_NAME = "extracted_terms"; private static final String UNKNOWN_QUERY_FIELD_NAME = "unknown_query"; + private static final String QUERY_BUILDER_FIELD_NAME = "query_builder_field"; + public static final String EXTRACTED_TERMS_FULL_FIELD_NAME = NAME + "." + EXTRACTED_TERMS_FIELD_NAME; public static final String UNKNOWN_QUERY_FULL_FIELD_NAME = NAME + "." + UNKNOWN_QUERY_FIELD_NAME; + public static final String QUERY_BUILDER_FULL_FIELD_NAME = NAME + "." + QUERY_BUILDER_FIELD_NAME; public static class Builder extends FieldMapper.Builder { @@ -60,19 +76,29 @@ public class PercolatorFieldMapper extends FieldMapper { @Override public PercolatorFieldMapper build(BuilderContext context) { context.path().add(name); - KeywordFieldMapper extractedTermsField = createStringFieldBuilder(EXTRACTED_TERMS_FIELD_NAME).build(context); - KeywordFieldMapper unknownQueryField = createStringFieldBuilder(UNKNOWN_QUERY_FIELD_NAME).build(context); + KeywordFieldMapper extractedTermsField = createExtractQueryFieldBuilder(EXTRACTED_TERMS_FIELD_NAME).build(context); + KeywordFieldMapper unknownQueryField = createExtractQueryFieldBuilder(UNKNOWN_QUERY_FIELD_NAME).build(context); + BinaryFieldMapper queryBuilderField = createQueryBuilderFieldBuilder().build(context); context.path().remove(); - return new PercolatorFieldMapper(name(), fieldType, defaultFieldType, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo, queryShardContext, extractedTermsField, unknownQueryField); + return new PercolatorFieldMapper(name(), fieldType, defaultFieldType, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo, queryShardContext, extractedTermsField, unknownQueryField, queryBuilderField); } - static KeywordFieldMapper.Builder createStringFieldBuilder(String name) { + static KeywordFieldMapper.Builder createExtractQueryFieldBuilder(String name) { KeywordFieldMapper.Builder queryMetaDataFieldBuilder = new KeywordFieldMapper.Builder(name); queryMetaDataFieldBuilder.docValues(false); queryMetaDataFieldBuilder.store(false); queryMetaDataFieldBuilder.indexOptions(IndexOptions.DOCS); return queryMetaDataFieldBuilder; } + + static BinaryFieldMapper.Builder createQueryBuilderFieldBuilder() { + BinaryFieldMapper.Builder builder = new BinaryFieldMapper.Builder(QUERY_BUILDER_FIELD_NAME); + builder.docValues(true); + builder.indexOptions(IndexOptions.NONE); + builder.store(false); + builder.fieldType().setDocValuesType(DocValuesType.BINARY); + return builder; + } } public static class TypeParser implements FieldMapper.TypeParser { @@ -111,26 +137,81 @@ public class PercolatorFieldMapper extends FieldMapper { private final QueryShardContext queryShardContext; private final KeywordFieldMapper queryTermsField; private final KeywordFieldMapper unknownQueryField; + private final BinaryFieldMapper queryBuilderField; - public PercolatorFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, MultiFields multiFields, CopyTo copyTo, QueryShardContext queryShardContext, KeywordFieldMapper queryTermsField, KeywordFieldMapper unknownQueryField) { + public PercolatorFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, + Settings indexSettings, MultiFields multiFields, CopyTo copyTo, QueryShardContext queryShardContext, + KeywordFieldMapper queryTermsField, KeywordFieldMapper unknownQueryField, + BinaryFieldMapper queryBuilderField) { super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo); this.queryShardContext = queryShardContext; this.queryTermsField = queryTermsField; this.unknownQueryField = unknownQueryField; - this.mapUnmappedFieldAsString = PercolatorQueriesRegistry.INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING.get(indexSettings); + this.queryBuilderField = queryBuilderField; + this.mapUnmappedFieldAsString = PercolatorQueryCache.INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING.get(indexSettings); } @Override public Mapper parse(ParseContext context) throws IOException { QueryShardContext queryShardContext = new QueryShardContext(this.queryShardContext); - Query query = PercolatorQueriesRegistry.parseQuery(queryShardContext, mapUnmappedFieldAsString, context.parser()); + QueryBuilder queryBuilder = parseQueryBuilder(queryShardContext.parseContext(), context.parser()); + // Fetching of terms, shapes and indexed scripts happen during this rewrite: + queryBuilder = queryBuilder.rewrite(queryShardContext); + + try (XContentBuilder builder = XContentFactory.contentBuilder(PercolatorQueryCache.QUERY_BUILDER_CONTENT_TYPE)) { + queryBuilder.toXContent(builder, new MapParams(Collections.emptyMap())); + builder.flush(); + byte[] queryBuilderAsBytes = builder.bytes().toBytes(); + context.doc().add(new Field(queryBuilderField.name(), queryBuilderAsBytes, queryBuilderField.fieldType())); + } + + Query query = toQuery(queryShardContext, mapUnmappedFieldAsString, queryBuilder); ExtractQueryTermsService.extractQueryTerms(query, context.doc(), queryTermsField.name(), unknownQueryField.name(), queryTermsField.fieldType()); return null; } + public static Query parseQuery(QueryShardContext context, boolean mapUnmappedFieldsAsString, XContentParser parser) throws IOException { + return toQuery(context, mapUnmappedFieldsAsString, parseQueryBuilder(context.parseContext(), parser)); + } + + static Query toQuery(QueryShardContext context, boolean mapUnmappedFieldsAsString, QueryBuilder queryBuilder) throws IOException { + context.reset(); + // This means that fields in the query need to exist in the mapping prior to registering this query + // The reason that this is required, is that if a field doesn't exist then the query assumes defaults, which may be undesired. + // + // Even worse when fields mentioned in percolator queries do go added to map after the queries have been registered + // then the percolator queries don't work as expected any more. + // + // Query parsing can't introduce new fields in mappings (which happens when registering a percolator query), + // because field type can't be inferred from queries (like document do) so the best option here is to disallow + // the usage of unmapped fields in percolator queries to avoid unexpected behaviour + // + // if index.percolator.map_unmapped_fields_as_string is set to true, query can contain unmapped fields which will be mapped + // as an analyzed string. + context.setAllowUnmappedFields(false); + context.setMapUnmappedFieldAsString(mapUnmappedFieldsAsString); + context.parseFieldMatcher(context.getIndexSettings().getParseFieldMatcher()); + try { + return queryBuilder.toQuery(context); + } finally { + context.reset(); + } + } + + static QueryBuilder parseQueryBuilder(QueryParseContext context, XContentParser parser) { + context.reset(parser); + try { + return context.parseInnerQueryBuilder(); + } catch (IOException e) { + throw new ParsingException(parser.getTokenLocation(), "Failed to parse", e); + } finally { + context.reset(null); + } + } + @Override public Iterator iterator() { - return Arrays.asList(queryTermsField, unknownQueryField).iterator(); + return Arrays.asList(queryTermsField, unknownQueryField, queryBuilderField).iterator(); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/percolator/PercolatorHighlightSubFetchPhase.java b/core/src/main/java/org/elasticsearch/index/percolator/PercolatorHighlightSubFetchPhase.java new file mode 100644 index 00000000000..c1f9720b53b --- /dev/null +++ b/core/src/main/java/org/elasticsearch/index/percolator/PercolatorHighlightSubFetchPhase.java @@ -0,0 +1,141 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.percolator; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.ReaderUtil; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.BoostQuery; +import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.text.Text; +import org.elasticsearch.index.percolator.PercolatorFieldMapper; +import org.elasticsearch.index.percolator.PercolatorQueryCache; +import org.elasticsearch.index.query.ParsedQuery; +import org.elasticsearch.index.query.PercolatorQuery; +import org.elasticsearch.search.SearchParseElement; +import org.elasticsearch.search.fetch.FetchSubPhase; +import org.elasticsearch.search.highlight.HighlightPhase; +import org.elasticsearch.search.highlight.SearchContextHighlight; +import org.elasticsearch.search.internal.InternalSearchHit; +import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.search.internal.SubSearchContext; + +import java.util.Collections; +import java.util.List; +import java.util.Map; + +// Highlighting in the case of the percolator query is a bit different, because the PercolatorQuery itself doesn't get highlighted, +// but the source of the PercolatorQuery gets highlighted by each hit with type '.percolator' (percolator queries). +public class PercolatorHighlightSubFetchPhase implements FetchSubPhase { + + private final HighlightPhase highlightPhase; + + @Inject + public PercolatorHighlightSubFetchPhase(HighlightPhase highlightPhase) { + this.highlightPhase = highlightPhase; + } + + @Override + public boolean hitsExecutionNeeded(SearchContext context) { + return context.highlight() != null && locatePercolatorQuery(context.query()) != null; + } + + @Override + public void hitsExecute(SearchContext context, InternalSearchHit[] hits) { + PercolatorQuery percolatorQuery = locatePercolatorQuery(context.query()); + if (percolatorQuery == null) { + // shouldn't happen as we checked for the existence of a percolator query in hitsExecutionNeeded(...) + throw new IllegalStateException("couldn't locate percolator query"); + } + + List ctxs = context.searcher().getIndexReader().leaves(); + PercolatorQueryCache queriesRegistry = context.percolatorQueryCache(); + IndexSearcher percolatorIndexSearcher = percolatorQuery.getPercolatorIndexSearcher(); + + LeafReaderContext percolatorLeafReaderContext = percolatorIndexSearcher.getIndexReader().leaves().get(0); + FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext(); + SubSearchContext subSearchContext = + createSubSearchContext(context, percolatorLeafReaderContext, percolatorQuery.getDocumentSource()); + + for (InternalSearchHit hit : hits) { + if (PercolatorFieldMapper.TYPE_NAME.equals(hit.getType())) { + LeafReaderContext ctx = ctxs.get(ReaderUtil.subIndex(hit.docId(), ctxs)); + Query query = queriesRegistry.getQueries(ctx).getQuery(hit.docId() - ctx.docBase); + subSearchContext.parsedQuery(new ParsedQuery(query)); + hitContext.reset( + new InternalSearchHit(0, "unknown", new Text(percolatorQuery.getDocumentType()), Collections.emptyMap()), + percolatorLeafReaderContext, 0, percolatorIndexSearcher + ); + hitContext.cache().clear(); + highlightPhase.hitExecute(subSearchContext, hitContext); + hit.highlightFields().putAll(hitContext.hit().getHighlightFields()); + } + } + + } + + @Override + public Map parseElements() { + return Collections.emptyMap(); + } + + @Override + public boolean hitExecutionNeeded(SearchContext context) { + return false; + } + + @Override + public void hitExecute(SearchContext context, HitContext hitContext) { + } + + static PercolatorQuery locatePercolatorQuery(Query query) { + if (query instanceof PercolatorQuery) { + return (PercolatorQuery) query; + } else if (query instanceof BooleanQuery) { + for (BooleanClause clause : ((BooleanQuery) query).clauses()) { + PercolatorQuery result = locatePercolatorQuery(clause.getQuery()); + if (result != null) { + return result; + } + } + } else if (query instanceof ConstantScoreQuery) { + return locatePercolatorQuery(((ConstantScoreQuery) query).getQuery()); + } else if (query instanceof BoostQuery) { + return locatePercolatorQuery(((BoostQuery) query).getQuery()); + } + + return null; + } + + private SubSearchContext createSubSearchContext(SearchContext context, LeafReaderContext leafReaderContext, BytesReference source) { + SubSearchContext subSearchContext = new SubSearchContext(context); + subSearchContext.highlight(new SearchContextHighlight(context.highlight().fields())); + // Enforce highlighting by source, because MemoryIndex doesn't support stored fields. + subSearchContext.highlight().globalForceSource(true); + subSearchContext.lookup().source().setSegmentAndDocument(leafReaderContext, 0); + subSearchContext.lookup().source().setSource(source); + return subSearchContext; + } +} diff --git a/core/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java b/core/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java deleted file mode 100644 index 67ba0aaf1d2..00000000000 --- a/core/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java +++ /dev/null @@ -1,236 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.percolator; - -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.Term; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.TermQuery; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.metrics.CounterMetric; -import org.elasticsearch.common.metrics.MeanMetric; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.fielddata.IndexFieldDataService; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.Uid; -import org.elasticsearch.index.mapper.internal.TypeFieldMapper; -import org.elasticsearch.index.mapper.internal.UidFieldMapper; -import org.elasticsearch.index.query.QueryShardContext; -import org.elasticsearch.index.shard.AbstractIndexShardComponent; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.percolator.PercolatorService; - -import java.io.Closeable; -import java.io.IOException; -import java.util.Map; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.TimeUnit; - -/** - * Each shard will have a percolator registry even if there isn't a {@link PercolatorService#TYPE_NAME} document type in the index. - * For shards with indices that have no {@link PercolatorService#TYPE_NAME} document type, this will hold no percolate queries. - *

    - * Once a document type has been created, the real-time percolator will start to listen to write events and update the - * this registry with queries in real time. - */ -public final class PercolatorQueriesRegistry extends AbstractIndexShardComponent implements Closeable { - - public final static Setting INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING = Setting.boolSetting("index.percolator.map_unmapped_fields_as_string", false, false, Setting.Scope.INDEX); - - private final ConcurrentMap percolateQueries = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency(); - private final QueryShardContext queryShardContext; - private boolean mapUnmappedFieldsAsString; - private final MeanMetric percolateMetric = new MeanMetric(); - private final CounterMetric currentMetric = new CounterMetric(); - private final CounterMetric numberOfQueries = new CounterMetric(); - - public PercolatorQueriesRegistry(ShardId shardId, IndexSettings indexSettings, QueryShardContext queryShardContext) { - super(shardId, indexSettings); - this.queryShardContext = queryShardContext; - this.mapUnmappedFieldsAsString = indexSettings.getValue(INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING); - } - - public ConcurrentMap getPercolateQueries() { - return percolateQueries; - } - - @Override - public void close() { - clear(); - } - - public void clear() { - percolateQueries.clear(); - } - - - public void addPercolateQuery(String idAsString, BytesReference source) { - Query newquery = parsePercolatorDocument(idAsString, source); - BytesRef id = new BytesRef(idAsString); - percolateQueries.put(id, newquery); - numberOfQueries.inc(); - - } - - public void removePercolateQuery(String idAsString) { - BytesRef id = new BytesRef(idAsString); - Query query = percolateQueries.remove(id); - if (query != null) { - numberOfQueries.dec(); - } - } - - public Query parsePercolatorDocument(String id, BytesReference source) { - try (XContentParser sourceParser = XContentHelper.createParser(source)) { - String currentFieldName = null; - XContentParser.Token token = sourceParser.nextToken(); // move the START_OBJECT - if (token != XContentParser.Token.START_OBJECT) { - throw new ElasticsearchException("failed to parse query [" + id + "], not starting with OBJECT"); - } - while ((token = sourceParser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = sourceParser.currentName(); - } else if (token == XContentParser.Token.START_OBJECT) { - if ("query".equals(currentFieldName)) { - return parseQuery(queryShardContext, mapUnmappedFieldsAsString, sourceParser); - } else { - sourceParser.skipChildren(); - } - } else if (token == XContentParser.Token.START_ARRAY) { - sourceParser.skipChildren(); - } - } - } catch (Exception e) { - throw new PercolatorException(shardId().getIndex(), "failed to parse query [" + id + "]", e); - } - return null; - } - - public static Query parseQuery(QueryShardContext queryShardContext, boolean mapUnmappedFieldsAsString, XContentParser parser) { - QueryShardContext context = new QueryShardContext(queryShardContext); - try { - context.reset(parser); - // This means that fields in the query need to exist in the mapping prior to registering this query - // The reason that this is required, is that if a field doesn't exist then the query assumes defaults, which may be undesired. - // - // Even worse when fields mentioned in percolator queries do go added to map after the queries have been registered - // then the percolator queries don't work as expected any more. - // - // Query parsing can't introduce new fields in mappings (which happens when registering a percolator query), - // because field type can't be inferred from queries (like document do) so the best option here is to disallow - // the usage of unmapped fields in percolator queries to avoid unexpected behaviour - // - // if index.percolator.map_unmapped_fields_as_string is set to true, query can contain unmapped fields which will be mapped - // as an analyzed string. - context.setAllowUnmappedFields(false); - context.setMapUnmappedFieldAsString(mapUnmappedFieldsAsString); - return context.parseInnerQuery(); - } catch (IOException e) { - throw new ParsingException(parser.getTokenLocation(), "Failed to parse", e); - } finally { - context.reset(null); - } - } - - public void loadQueries(IndexReader reader) { - logger.trace("loading percolator queries..."); - final int loadedQueries; - try { - Query query = new TermQuery(new Term(TypeFieldMapper.NAME, PercolatorService.TYPE_NAME)); - QueriesLoaderCollector queryCollector = new QueriesLoaderCollector(PercolatorQueriesRegistry.this, logger); - IndexSearcher indexSearcher = new IndexSearcher(reader); - indexSearcher.setQueryCache(null); - indexSearcher.search(query, queryCollector); - Map queries = queryCollector.queries(); - for (Map.Entry entry : queries.entrySet()) { - percolateQueries.put(entry.getKey(), entry.getValue()); - numberOfQueries.inc(); - } - loadedQueries = queries.size(); - } catch (Exception e) { - throw new PercolatorException(shardId.getIndex(), "failed to load queries from percolator index", e); - } - logger.debug("done loading [{}] percolator queries", loadedQueries); - } - - public boolean isPercolatorQuery(Engine.Index operation) { - if (PercolatorService.TYPE_NAME.equals(operation.type())) { - parsePercolatorDocument(operation.id(), operation.source()); - return true; - } - return false; - } - - public boolean isPercolatorQuery(Engine.Delete operation) { - return PercolatorService.TYPE_NAME.equals(operation.type()); - } - - public synchronized void updatePercolateQuery(Engine engine, String id) { - // this can be called out of order as long as for every change to a percolator document it's invoked. This will always - // fetch the latest change but might fetch the same change twice if updates / deletes happen concurrently. - try (Engine.GetResult getResult = engine.get(new Engine.Get(true, new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(PercolatorService.TYPE_NAME, id))))) { - if (getResult.exists()) { - addPercolateQuery(id, getResult.source().source); - } else { - removePercolateQuery(id); - } - } - } - - public void prePercolate() { - currentMetric.inc(); - } - - public void postPercolate(long tookInNanos) { - currentMetric.dec(); - percolateMetric.inc(tookInNanos); - } - - /** - * @return The current metrics - */ - public PercolateStats stats() { - return new PercolateStats(percolateMetric.count(), TimeUnit.NANOSECONDS.toMillis(percolateMetric.sum()), currentMetric.count(), -1, numberOfQueries.count()); - } - - // Enable when a more efficient manner is found for estimating the size of a Lucene query. - /*private static long computeSizeInMemory(HashedBytesRef id, Query query) { - long size = (3 * RamUsageEstimator.NUM_BYTES_INT) + RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + id.bytes.bytes.length; - size += RamEstimator.sizeOf(query); - return size; - } - - private static final class RamEstimator { - // we move this into it's own class to exclude it from the forbidden API checks - // it's fine to use here! - static long sizeOf(Query query) { - return RamUsageEstimator.sizeOf(query); - } - }*/ -} diff --git a/core/src/main/java/org/elasticsearch/index/percolator/PercolatorQueryCache.java b/core/src/main/java/org/elasticsearch/index/percolator/PercolatorQueryCache.java new file mode 100644 index 00000000000..7c9602b4909 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/index/percolator/PercolatorQueryCache.java @@ -0,0 +1,266 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.percolator; + +import com.carrotsearch.hppc.IntObjectHashMap; +import org.apache.lucene.index.BinaryDocValues; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.PostingsEnum; +import org.apache.lucene.index.StoredFieldVisitor; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Query; +import org.apache.lucene.util.Bits; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.cache.Cache; +import org.elasticsearch.common.cache.CacheBuilder; +import org.elasticsearch.common.lucene.index.ElasticsearchLeafReader; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContent; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexWarmer; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.mapper.internal.SourceFieldMapper; +import org.elasticsearch.index.mapper.internal.TypeFieldMapper; +import org.elasticsearch.index.query.PercolatorQuery; +import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardUtils; +import org.elasticsearch.threadpool.ThreadPool; + +import java.io.Closeable; +import java.io.IOException; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executor; +import java.util.function.Supplier; + +public final class PercolatorQueryCache extends AbstractIndexComponent + implements Closeable, LeafReader.CoreClosedListener, PercolatorQuery.QueryRegistry { + + public final static Setting INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING = + Setting.boolSetting("index.percolator.map_unmapped_fields_as_string", false, Setting.Property.IndexScope); + + public final static XContentType QUERY_BUILDER_CONTENT_TYPE = XContentType.SMILE; + + private final Supplier queryShardContextSupplier; + private final Cache cache; + private final boolean mapUnmappedFieldsAsString; + + public PercolatorQueryCache(IndexSettings indexSettings, Supplier queryShardContextSupplier) { + super(indexSettings); + this.queryShardContextSupplier = queryShardContextSupplier; + cache = CacheBuilder.builder().build(); + this.mapUnmappedFieldsAsString = indexSettings.getValue(INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING); + } + + @Override + public Leaf getQueries(LeafReaderContext ctx) { + QueriesLeaf percolatorQueries = cache.get(ctx.reader().getCoreCacheKey()); + if (percolatorQueries == null) { + throw new IllegalStateException("queries not loaded, queries should be have been preloaded during index warming..."); + } + return percolatorQueries; + } + + public IndexWarmer.Listener createListener(ThreadPool threadPool) { + return new IndexWarmer.Listener() { + + final Executor executor = threadPool.executor(ThreadPool.Names.WARMER); + + @Override + public IndexWarmer.TerminationHandle warmNewReaders(IndexShard indexShard, Engine.Searcher searcher) { + final CountDownLatch latch = new CountDownLatch(searcher.reader().leaves().size()); + for (final LeafReaderContext ctx : searcher.reader().leaves()) { + executor.execute(() -> { + try { + final long start = System.nanoTime(); + QueriesLeaf queries = loadQueries(ctx, indexShard.indexSettings().getIndexVersionCreated()); + cache.put(ctx.reader().getCoreCacheKey(), queries); + if (indexShard.warmerService().logger().isTraceEnabled()) { + indexShard.warmerService().logger().trace( + "loading percolator queries took [{}]", + TimeValue.timeValueNanos(System.nanoTime() - start) + ); + } + } catch (Throwable t) { + indexShard.warmerService().logger().warn("failed to load percolator queries", t); + } finally { + latch.countDown(); + } + }); + } + return () -> latch.await(); + } + + @Override + public IndexWarmer.TerminationHandle warmTopReader(IndexShard indexShard, Engine.Searcher searcher) { + return IndexWarmer.TerminationHandle.NO_WAIT; + } + }; + } + + QueriesLeaf loadQueries(LeafReaderContext context, Version indexVersionCreated) throws IOException { + LeafReader leafReader = context.reader(); + ShardId shardId = ShardUtils.extractShardId(leafReader); + if (shardId == null) { + throw new IllegalStateException("can't resolve shard id"); + } + if (indexSettings.getIndex().equals(shardId.getIndex()) == false) { + // percolator cache insanity + String message = "Trying to load queries for index " + shardId.getIndex() + " with cache of index " + indexSettings.getIndex(); + throw new IllegalStateException(message); + } + + IntObjectHashMap queries = new IntObjectHashMap<>(); + boolean legacyLoading = indexVersionCreated.before(Version.V_5_0_0); + PostingsEnum postings = leafReader.postings(new Term(TypeFieldMapper.NAME, PercolatorFieldMapper.TYPE_NAME), PostingsEnum.NONE); + if (postings != null) { + if (legacyLoading) { + LegacyQueryFieldVisitor visitor = new LegacyQueryFieldVisitor(); + for (int docId = postings.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = postings.nextDoc()) { + leafReader.document(docId, visitor); + queries.put(docId, parseLegacyPercolatorDocument(docId, visitor.source)); + visitor.source = null; // reset + } + } else { + BinaryDocValues binaryDocValues = leafReader.getBinaryDocValues(PercolatorFieldMapper.QUERY_BUILDER_FULL_FIELD_NAME); + if (binaryDocValues != null) { + for (int docId = postings.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = postings.nextDoc()) { + BytesRef queryBuilder = binaryDocValues.get(docId); + if (queryBuilder.length > 0) { + queries.put(docId, parseQueryBuilder(docId, queryBuilder)); + } + } + } + } + } + leafReader.addCoreClosedListener(this); + return new QueriesLeaf(shardId, queries); + } + + private Query parseQueryBuilder(int docId, BytesRef queryBuilder) { + XContent xContent = QUERY_BUILDER_CONTENT_TYPE.xContent(); + try (XContentParser sourceParser = xContent.createParser(queryBuilder.bytes, queryBuilder.offset, queryBuilder.length)) { + QueryShardContext context = queryShardContextSupplier.get(); + return PercolatorFieldMapper.parseQuery(context, mapUnmappedFieldsAsString, sourceParser); + } catch (IOException e) { + throw new PercolatorException(index(), "failed to parse query builder for document [" + docId + "]", e); + } + } + + private Query parseLegacyPercolatorDocument(int docId, BytesReference source) { + try (XContentParser sourceParser = XContentHelper.createParser(source)) { + String currentFieldName = null; + XContentParser.Token token = sourceParser.nextToken(); // move the START_OBJECT + if (token != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchException("failed to parse query [" + docId + "], not starting with OBJECT"); + } + while ((token = sourceParser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = sourceParser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + if ("query".equals(currentFieldName)) { + QueryShardContext context = queryShardContextSupplier.get(); + return PercolatorFieldMapper.parseQuery(context, mapUnmappedFieldsAsString, sourceParser); + } else { + sourceParser.skipChildren(); + } + } else if (token == XContentParser.Token.START_ARRAY) { + sourceParser.skipChildren(); + } + } + } catch (Exception e) { + throw new PercolatorException(index(), "failed to parse query [" + docId + "]", e); + } + return null; + } + + public PercolatorQueryCacheStats getStats(ShardId shardId) { + int numberOfQueries = 0; + for (QueriesLeaf queries : cache.values()) { + if (shardId.equals(queries.shardId)) { + numberOfQueries += queries.queries.size(); + } + } + return new PercolatorQueryCacheStats(numberOfQueries); + } + + @Override + public void onClose(Object cacheKey) throws IOException { + cache.invalidate(cacheKey); + } + + @Override + public void close() throws IOException { + cache.invalidateAll(); + } + + final static class LegacyQueryFieldVisitor extends StoredFieldVisitor { + + private BytesArray source; + + @Override + public void binaryField(FieldInfo fieldInfo, byte[] bytes) throws IOException { + source = new BytesArray(bytes); + } + + @Override + public Status needsField(FieldInfo fieldInfo) throws IOException { + if (source != null) { + return Status.STOP; + } + if (SourceFieldMapper.NAME.equals(fieldInfo.name)) { + return Status.YES; + } else { + return Status.NO; + } + } + + } + + final static class QueriesLeaf implements Leaf { + + final ShardId shardId; + final IntObjectHashMap queries; + + QueriesLeaf(ShardId shardId, IntObjectHashMap queries) { + this.shardId = shardId; + this.queries = queries; + } + + @Override + public Query getQuery(int docId) { + return queries.get(docId); + } + } +} diff --git a/core/src/main/java/org/elasticsearch/index/percolator/PercolatorQueryCacheStats.java b/core/src/main/java/org/elasticsearch/index/percolator/PercolatorQueryCacheStats.java new file mode 100644 index 00000000000..a8e3b7f4799 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/index/percolator/PercolatorQueryCacheStats.java @@ -0,0 +1,90 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.percolator; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentBuilderString; + +import java.io.IOException; + +/** + * Exposes percolator query cache statistics. + */ +public class PercolatorQueryCacheStats implements Streamable, ToXContent { + + private long numQueries; + + /** + * Noop constructor for serialization purposes. + */ + public PercolatorQueryCacheStats() { + } + + PercolatorQueryCacheStats(long numQueries) { + this.numQueries = numQueries; + } + + /** + * @return The total number of loaded percolate queries. + */ + public long getNumQueries() { + return numQueries; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(Fields.PERCOLATOR); + builder.field(Fields.QUERIES, getNumQueries()); + builder.endObject(); + return builder; + } + + public void add(PercolatorQueryCacheStats percolate) { + if (percolate == null) { + return; + } + + numQueries += percolate.getNumQueries(); + } + + static final class Fields { + static final XContentBuilderString PERCOLATOR = new XContentBuilderString("percolator"); + static final XContentBuilderString QUERIES = new XContentBuilderString("num_queries"); + } + + public static PercolatorQueryCacheStats readPercolateStats(StreamInput in) throws IOException { + PercolatorQueryCacheStats stats = new PercolatorQueryCacheStats(); + stats.readFrom(in); + return stats; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + numQueries = in.readVLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(numQueries); + } +} diff --git a/core/src/main/java/org/elasticsearch/index/percolator/QueriesLoaderCollector.java b/core/src/main/java/org/elasticsearch/index/percolator/QueriesLoaderCollector.java deleted file mode 100644 index 1bea43e4ea1..00000000000 --- a/core/src/main/java/org/elasticsearch/index/percolator/QueriesLoaderCollector.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.index.percolator; - -import org.apache.lucene.index.LeafReader; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.Scorer; -import org.apache.lucene.search.SimpleCollector; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.index.fielddata.IndexFieldData; -import org.elasticsearch.index.fielddata.IndexFieldDataService; -import org.elasticsearch.index.fielddata.SortedBinaryDocValues; -import org.elasticsearch.index.fieldvisitor.FieldsVisitor; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.Uid; -import org.elasticsearch.index.mapper.internal.UidFieldMapper; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -/** - */ -final class QueriesLoaderCollector extends SimpleCollector { - - private final Map queries = new HashMap<>(); - private final FieldsVisitor fieldsVisitor = new FieldsVisitor(true); - private final PercolatorQueriesRegistry percolator; - private final ESLogger logger; - - private LeafReader reader; - - QueriesLoaderCollector(PercolatorQueriesRegistry percolator, ESLogger logger) { - this.percolator = percolator; - this.logger = logger; - } - - public Map queries() { - return this.queries; - } - - @Override - public void collect(int doc) throws IOException { - fieldsVisitor.reset(); - reader.document(doc, fieldsVisitor); - final Uid uid = fieldsVisitor.uid(); - - try { - // id is only used for logging, if we fail we log the id in the catch statement - final Query parseQuery = percolator.parsePercolatorDocument(null, fieldsVisitor.source()); - if (parseQuery != null) { - queries.put(new BytesRef(uid.id()), parseQuery); - } else { - logger.warn("failed to add query [{}] - parser returned null", uid); - } - - } catch (Exception e) { - logger.warn("failed to add query [{}]", e, uid); - } - } - - @Override - protected void doSetNextReader(LeafReaderContext context) throws IOException { - reader = context.reader(); - } - - @Override - public void setScorer(Scorer scorer) throws IOException { - } - - @Override - public boolean needsScores() { - return false; - } -} diff --git a/core/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryParser.java index 1ad64c42135..318a0b33805 100644 --- a/core/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryParser.java @@ -42,7 +42,7 @@ public class ConstantScoreQueryParser implements QueryParser query = null; boolean queryFound = false; String queryName = null; float boost = AbstractQueryBuilder.DEFAULT_BOOST; @@ -56,6 +56,10 @@ public class ConstantScoreQueryParser implements QueryParser use prefix encoded postings format diff --git a/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java index 784c924efcf..b11b57df175 100644 --- a/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java @@ -219,18 +219,18 @@ public class GeoDistanceQueryBuilder extends AbstractQueryBuilderhas_child queries. @@ -104,12 +105,21 @@ public class HasChildQueryParser implements QueryParser { return ScoreMode.Max; } else if ("avg".equals(scoreModeString)) { return ScoreMode.Avg; - } else if ("total".equals(scoreModeString)) { + } else if ("sum".equals(scoreModeString)) { return ScoreMode.Total; } throw new IllegalArgumentException("No score mode for child query [" + scoreModeString + "] found"); } + public static String scoreModeAsString(ScoreMode scoreMode) { + if (scoreMode == ScoreMode.Total) { + // Lucene uses 'total' but 'sum' is more consistent with other elasticsearch APIs + return "sum"; + } else { + return scoreMode.name().toLowerCase(Locale.ROOT); + } + } + @Override public HasChildQueryBuilder getBuilderPrototype() { return HasChildQueryBuilder.PROTOTYPE; diff --git a/core/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java index 596c2499211..bd5f348db33 100644 --- a/core/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java @@ -121,7 +121,7 @@ public class NestedQueryBuilder extends AbstractQueryBuilder query.toXContent(builder, params); builder.field(NestedQueryParser.PATH_FIELD.getPreferredName(), path); if (scoreMode != null) { - builder.field(NestedQueryParser.SCORE_MODE_FIELD.getPreferredName(), scoreMode.name().toLowerCase(Locale.ROOT)); + builder.field(NestedQueryParser.SCORE_MODE_FIELD.getPreferredName(), HasChildQueryParser.scoreModeAsString(scoreMode)); } printBoostAndQueryName(builder); if (queryInnerHits != null) { diff --git a/core/src/main/java/org/elasticsearch/index/query/NestedQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/NestedQueryParser.java index ba5d7c2447e..218919f7ed2 100644 --- a/core/src/main/java/org/elasticsearch/index/query/NestedQueryParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/NestedQueryParser.java @@ -68,20 +68,7 @@ public class NestedQueryParser implements QueryParser { } else if (parseContext.parseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { boost = parser.floatValue(); } else if (parseContext.parseFieldMatcher().match(currentFieldName, SCORE_MODE_FIELD)) { - String sScoreMode = parser.text(); - if ("avg".equals(sScoreMode)) { - scoreMode = ScoreMode.Avg; - } else if ("min".equals(sScoreMode)) { - scoreMode = ScoreMode.Min; - } else if ("max".equals(sScoreMode)) { - scoreMode = ScoreMode.Max; - } else if ("total".equals(sScoreMode) || "sum".equals(sScoreMode)) { - scoreMode = ScoreMode.Total; - } else if ("none".equals(sScoreMode)) { - scoreMode = ScoreMode.None; - } else { - throw new ParsingException(parser.getTokenLocation(), "illegal score_mode for nested query [" + sScoreMode + "]"); - } + scoreMode = HasChildQueryParser.parseScoreMode(parser.text()); } else if (parseContext.parseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { queryName = parser.text(); } else { diff --git a/core/src/main/java/org/elasticsearch/index/query/ParentIdQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/ParentIdQueryBuilder.java index f9bd7623f35..4a2efa95c9a 100644 --- a/core/src/main/java/org/elasticsearch/index/query/ParentIdQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/ParentIdQueryBuilder.java @@ -19,13 +19,18 @@ package org.elasticsearch.index.query; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.DocValuesTermsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermQuery; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.internal.ParentFieldMapper; +import org.elasticsearch.index.mapper.internal.TypeFieldMapper; import java.io.IOException; import java.util.Objects; @@ -71,7 +76,12 @@ public final class ParentIdQueryBuilder extends AbstractQueryBuilder percolatorQueries; - private Query percolateQuery; private Query queriesMetaDataQuery; private final Query percolateTypeQuery; /** - * @param percolatorIndexSearcher The index searcher on top of the in-memory index that holds the document being percolated - * @param percolatorQueries All the registered percolator queries - * @param percolateTypeQuery A query that identifies all document containing percolator queries + * @param docType The type of the document being percolated + * @param queryRegistry The registry holding all the percolator queries as Lucene queries. + * @param documentSource The source of the document being percolated + * @param percolatorIndexSearcher The index searcher on top of the in-memory index that holds the document being percolated + * @param percolateTypeQuery A query that identifies all document containing percolator queries */ - Builder(IndexSearcher percolatorIndexSearcher, Map percolatorQueries, Query percolateTypeQuery) { - this.percolatorIndexSearcher = percolatorIndexSearcher; - this.percolatorQueries = percolatorQueries; - this.percolateTypeQuery = percolateTypeQuery; - } - - /** - * Optionally sets a query that reduces the number of queries to percolate based on custom metadata attached - * on the percolator documents. - */ - void setPercolateQuery(Query percolateQuery) { - this.percolateQuery = percolateQuery; + public Builder(String docType, QueryRegistry queryRegistry, BytesReference documentSource, IndexSearcher percolatorIndexSearcher, + Query percolateTypeQuery) { + this.docType = Objects.requireNonNull(docType); + this.documentSource = Objects.requireNonNull(documentSource); + this.percolatorIndexSearcher = Objects.requireNonNull(percolatorIndexSearcher); + this.queryRegistry = Objects.requireNonNull(queryRegistry); + this.percolateTypeQuery = Objects.requireNonNull(percolateTypeQuery); } /** @@ -85,43 +81,43 @@ final class PercolatorQuery extends Query { * @param extractedTermsFieldName The name of the field to get the extracted terms from * @param unknownQueryFieldname The field used to mark documents whose queries couldn't all get extracted */ - void extractQueryTermsQuery(String extractedTermsFieldName, String unknownQueryFieldname) throws IOException { - this.queriesMetaDataQuery = ExtractQueryTermsService.createQueryTermsQuery(percolatorIndexSearcher.getIndexReader(), extractedTermsFieldName, unknownQueryFieldname); + public void extractQueryTermsQuery(String extractedTermsFieldName, String unknownQueryFieldname) throws IOException { + this.queriesMetaDataQuery = ExtractQueryTermsService.createQueryTermsQuery( + percolatorIndexSearcher.getIndexReader(), extractedTermsFieldName, unknownQueryFieldname + ); } - PercolatorQuery build() { + public PercolatorQuery build() { BooleanQuery.Builder builder = new BooleanQuery.Builder(); builder.add(percolateTypeQuery, FILTER); if (queriesMetaDataQuery != null) { builder.add(queriesMetaDataQuery, FILTER); } - if (percolateQuery != null){ - builder.add(percolateQuery, MUST); - } - return new PercolatorQuery(builder.build(), percolatorIndexSearcher, percolatorQueries); + return new PercolatorQuery(docType, queryRegistry, documentSource, builder.build(), percolatorIndexSearcher); } } + private final String documentType; + private final QueryRegistry queryRegistry; + private final BytesReference documentSource; private final Query percolatorQueriesQuery; private final IndexSearcher percolatorIndexSearcher; - private final Map percolatorQueries; - private PercolatorQuery(Query percolatorQueriesQuery, IndexSearcher percolatorIndexSearcher, Map percolatorQueries) { + private PercolatorQuery(String documentType, QueryRegistry queryRegistry, BytesReference documentSource, + Query percolatorQueriesQuery, IndexSearcher percolatorIndexSearcher) { + this.documentType = documentType; + this.documentSource = documentSource; this.percolatorQueriesQuery = percolatorQueriesQuery; + this.queryRegistry = queryRegistry; this.percolatorIndexSearcher = percolatorIndexSearcher; - this.percolatorQueries = percolatorQueries; } @Override public Query rewrite(IndexReader reader) throws IOException { - if (getBoost() != 1f) { - return super.rewrite(reader); - } - Query rewritten = percolatorQueriesQuery.rewrite(reader); if (rewritten != percolatorQueriesQuery) { - return new PercolatorQuery(rewritten, percolatorIndexSearcher, percolatorQueries); + return new PercolatorQuery(documentType, queryRegistry, documentSource, rewritten, percolatorIndexSearcher); } else { return this; } @@ -164,7 +160,7 @@ final class PercolatorQuery extends Query { return null; } - final LeafReader leafReader = leafReaderContext.reader(); + final QueryRegistry.Leaf percolatorQueries = queryRegistry.getQueries(leafReaderContext); return new Scorer(this) { @Override @@ -177,7 +173,7 @@ final class PercolatorQuery extends Query { return new TwoPhaseIterator(approximation.iterator()) { @Override public boolean matches() throws IOException { - return matchDocId(approximation.docID(), leafReader); + return matchDocId(approximation.docID()); } @Override @@ -202,27 +198,30 @@ final class PercolatorQuery extends Query { return approximation.docID(); } - boolean matchDocId(int docId, LeafReader leafReader) throws IOException { - SingleFieldsVisitor singleFieldsVisitor = new SingleFieldsVisitor(UidFieldMapper.NAME); - leafReader.document(docId, singleFieldsVisitor); - BytesRef percolatorQueryId = new BytesRef(singleFieldsVisitor.uid().id()); - return matchQuery(percolatorQueryId); + boolean matchDocId(int docId) throws IOException { + Query query = percolatorQueries.getQuery(docId); + if (query != null) { + return Lucene.exists(percolatorIndexSearcher, query); + } else { + return false; + } } }; } }; } - boolean matchQuery(BytesRef percolatorQueryId) throws IOException { - Query percolatorQuery = percolatorQueries.get(percolatorQueryId); - if (percolatorQuery != null) { - return Lucene.exists(percolatorIndexSearcher, percolatorQuery); - } else { - return false; - } + public IndexSearcher getPercolatorIndexSearcher() { + return percolatorIndexSearcher; } - private final Object instance = new Object(); + public String getDocumentType() { + return documentType; + } + + public BytesReference getDocumentSource() { + return documentSource; + } @Override public boolean equals(Object o) { @@ -232,19 +231,46 @@ final class PercolatorQuery extends Query { PercolatorQuery that = (PercolatorQuery) o; - return instance.equals(that.instance); + if (!documentType.equals(that.documentType)) return false; + return documentSource.equals(that.documentSource); } @Override public int hashCode() { int result = super.hashCode(); - result = 31 * result + instance.hashCode(); + result = 31 * result + documentType.hashCode(); + result = 31 * result + documentSource.hashCode(); return result; } @Override public String toString(String s) { - return "PercolatorQuery{inner={" + percolatorQueriesQuery.toString(s) + "}}"; + return "PercolatorQuery{document_type={" + documentType + "},document_source={" + documentSource.toUtf8() + + "},inner={" + percolatorQueriesQuery.toString(s) + "}}"; } + + @Override + public long ramBytesUsed() { + long sizeInBytes = 0; + if (documentSource.hasArray()) { + sizeInBytes += documentSource.array().length; + } else { + sizeInBytes += documentSource.length(); + } + return sizeInBytes; + } + + public interface QueryRegistry { + + Leaf getQueries(LeafReaderContext ctx); + + interface Leaf { + + Query getQuery(int docId); + + } + + } + } diff --git a/core/src/main/java/org/elasticsearch/index/query/PercolatorQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/PercolatorQueryBuilder.java new file mode 100644 index 00000000000..5cb1e54d203 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/index/query/PercolatorQueryBuilder.java @@ -0,0 +1,375 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.MultiReader; +import org.apache.lucene.index.SlowCompositeReaderWrapper; +import org.apache.lucene.index.Term; +import org.apache.lucene.index.memory.MemoryIndex; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.Weight; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.Version; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.lucene.search.Queries; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.DocumentMapperForType; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.ParseContext; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.internal.TypeFieldMapper; +import org.elasticsearch.index.mapper.internal.UidFieldMapper; +import org.elasticsearch.index.percolator.PercolatorFieldMapper; +import org.elasticsearch.index.percolator.PercolatorQueryCache; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.index.mapper.SourceToParse.source; + +public class PercolatorQueryBuilder extends AbstractQueryBuilder { + + public static final String NAME = "percolator"; + static final PercolatorQueryBuilder PROTO = new PercolatorQueryBuilder(null, null, null, null, null, null, null, null); + + private final String documentType; + private final BytesReference document; + + private final String indexedDocumentIndex; + private final String indexedDocumentType; + private final String indexedDocumentId; + private final String indexedDocumentRouting; + private final String indexedDocumentPreference; + private final Long indexedDocumentVersion; + + public PercolatorQueryBuilder(String documentType, BytesReference document) { + if (documentType == null) { + throw new IllegalArgumentException("[document_type] is a required argument"); + } + if (document == null) { + throw new IllegalArgumentException("[document] is a required argument"); + } + this.documentType = documentType; + this.document = document; + indexedDocumentIndex = null; + indexedDocumentType = null; + indexedDocumentId = null; + indexedDocumentRouting = null; + indexedDocumentPreference = null; + indexedDocumentVersion = null; + } + + public PercolatorQueryBuilder(String documentType, String indexedDocumentIndex, String indexedDocumentType, + String indexedDocumentId, String indexedDocumentRouting, String indexedDocumentPreference, + Long indexedDocumentVersion) { + if (documentType == null) { + throw new IllegalArgumentException("[document_type] is a required argument"); + } + if (indexedDocumentIndex == null) { + throw new IllegalArgumentException("[index] is a required argument"); + } + if (indexedDocumentType == null) { + throw new IllegalArgumentException("[type] is a required argument"); + } + if (indexedDocumentId == null) { + throw new IllegalArgumentException("[id] is a required argument"); + } + this.documentType = documentType; + this.indexedDocumentIndex = indexedDocumentIndex; + this.indexedDocumentType = indexedDocumentType; + this.indexedDocumentId = indexedDocumentId; + this.indexedDocumentRouting = indexedDocumentRouting; + this.indexedDocumentPreference = indexedDocumentPreference; + this.indexedDocumentVersion = indexedDocumentVersion; + this.document = null; + } + + private PercolatorQueryBuilder(String documentType, BytesReference document, String indexedDocumentIndex, String indexedDocumentType, + String indexedDocumentId, String indexedDocumentRouting, String indexedDocumentPreference, + Long indexedDocumentVersion) { + this.documentType = documentType; + this.document = document; + this.indexedDocumentIndex = indexedDocumentIndex; + this.indexedDocumentType = indexedDocumentType; + this.indexedDocumentId = indexedDocumentId; + this.indexedDocumentRouting = indexedDocumentRouting; + this.indexedDocumentPreference = indexedDocumentPreference; + this.indexedDocumentVersion = indexedDocumentVersion; + } + + @Override + protected void doXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(NAME); + builder.field(PercolatorQueryParser.DOCUMENT_TYPE_FIELD.getPreferredName(), documentType); + if (document != null) { + XContentType contentType = XContentFactory.xContentType(document); + if (contentType == builder.contentType()) { + builder.rawField(PercolatorQueryParser.DOCUMENT_FIELD.getPreferredName(), document); + } else { + XContentParser parser = XContentFactory.xContent(contentType).createParser(document); + parser.nextToken(); + builder.field(PercolatorQueryParser.DOCUMENT_FIELD.getPreferredName()); + builder.copyCurrentStructure(parser); + } + } + if (indexedDocumentIndex != null || indexedDocumentType != null || indexedDocumentId != null) { + if (indexedDocumentIndex != null) { + builder.field(PercolatorQueryParser.INDEXED_DOCUMENT_FIELD_INDEX.getPreferredName(), indexedDocumentIndex); + } + if (indexedDocumentType != null) { + builder.field(PercolatorQueryParser.INDEXED_DOCUMENT_FIELD_TYPE.getPreferredName(), indexedDocumentType); + } + if (indexedDocumentId != null) { + builder.field(PercolatorQueryParser.INDEXED_DOCUMENT_FIELD_ID.getPreferredName(), indexedDocumentId); + } + if (indexedDocumentRouting != null) { + builder.field(PercolatorQueryParser.INDEXED_DOCUMENT_FIELD_ROUTING.getPreferredName(), indexedDocumentRouting); + } + if (indexedDocumentPreference != null) { + builder.field(PercolatorQueryParser.INDEXED_DOCUMENT_FIELD_PREFERENCE.getPreferredName(), indexedDocumentPreference); + } + if (indexedDocumentVersion != null) { + builder.field(PercolatorQueryParser.INDEXED_DOCUMENT_FIELD_VERSION.getPreferredName(), indexedDocumentVersion); + } + } + printBoostAndQueryName(builder); + builder.endObject(); + } + + @Override + protected PercolatorQueryBuilder doReadFrom(StreamInput in) throws IOException { + String docType = in.readString(); + String documentIndex = in.readOptionalString(); + String documentType = in.readOptionalString(); + String documentId = in.readOptionalString(); + String documentRouting = in.readOptionalString(); + String documentPreference = in.readOptionalString(); + Long documentVersion = null; + if (in.readBoolean()) { + documentVersion = in.readVLong(); + } + BytesReference documentSource = null; + if (in.readBoolean()) { + documentSource = in.readBytesReference(); + } + return new PercolatorQueryBuilder(docType, documentSource, documentIndex, documentType, documentId, + documentRouting, documentPreference, documentVersion); + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + out.writeString(documentType); + out.writeOptionalString(indexedDocumentIndex); + out.writeOptionalString(indexedDocumentType); + out.writeOptionalString(indexedDocumentId); + out.writeOptionalString(indexedDocumentRouting); + out.writeOptionalString(indexedDocumentPreference); + if (indexedDocumentVersion != null) { + out.writeBoolean(true); + out.writeVLong(indexedDocumentVersion); + } else { + out.writeBoolean(false); + } + if (document != null) { + out.writeBoolean(true); + out.writeBytesReference(document); + } else { + out.writeBoolean(false); + } + } + + @Override + protected boolean doEquals(PercolatorQueryBuilder other) { + return Objects.equals(documentType, other.documentType) + && Objects.equals(document, other.document) + && Objects.equals(indexedDocumentIndex, other.indexedDocumentIndex) + && Objects.equals(indexedDocumentType, other.indexedDocumentType) + && Objects.equals(indexedDocumentId, other.indexedDocumentId); + } + + @Override + protected int doHashCode() { + return Objects.hash(documentType, document, indexedDocumentIndex, indexedDocumentType, indexedDocumentId); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + protected QueryBuilder doRewrite(QueryRewriteContext queryShardContext) throws IOException { + if (document != null) { + return this; + } + + GetRequest getRequest = new GetRequest(indexedDocumentIndex, indexedDocumentType, indexedDocumentId); + getRequest.preference("_local"); + getRequest.routing(indexedDocumentRouting); + getRequest.preference(indexedDocumentPreference); + if (indexedDocumentVersion != null) { + getRequest.version(indexedDocumentVersion); + } + GetResponse getResponse = queryShardContext.getClient().get(getRequest).actionGet(); + if (getResponse.isExists() == false) { + throw new ResourceNotFoundException( + "indexed document [{}/{}/{}] couldn't be found", indexedDocumentIndex, indexedDocumentType, indexedDocumentId + ); + } + return new PercolatorQueryBuilder(documentType, getResponse.getSourceAsBytesRef()); + } + + @Override + protected Query doToQuery(QueryShardContext context) throws IOException { + if (indexedDocumentIndex != null || indexedDocumentType != null || indexedDocumentId != null) { + throw new IllegalStateException("query builder must be rewritten first"); + } + + if (document == null) { + throw new IllegalStateException("nothing to percolator"); + } + + MapperService mapperService = context.getMapperService(); + DocumentMapperForType docMapperForType = mapperService.documentMapperWithAutoCreate(documentType); + DocumentMapper docMapper = docMapperForType.getDocumentMapper(); + + ParsedDocument doc = docMapper.parse(source(document) + .index(context.index().getName()) + .id("_temp_id") + .type(documentType)); + + Analyzer defaultAnalyzer = context.getAnalysisService().defaultIndexAnalyzer(); + final IndexSearcher docSearcher; + if (doc.docs().size() > 1) { + assert docMapper.hasNestedObjects(); + docSearcher = createMultiDocumentSearcher(docMapper, defaultAnalyzer, doc); + } else { + // TODO: we may want to bring to MemoryIndex thread local cache back... + // but I'm unsure about the real benefits. + MemoryIndex memoryIndex = new MemoryIndex(true); + indexDoc(docMapper, defaultAnalyzer, doc.rootDoc(), memoryIndex); + docSearcher = memoryIndex.createSearcher(); + docSearcher.setQueryCache(null); + } + + PercolatorQueryCache registry = context.getPercolatorQueryCache(); + if (registry == null) { + throw new QueryShardException(context, "no percolator query registry"); + } + + Query percolateTypeQuery = new TermQuery(new Term(TypeFieldMapper.NAME, PercolatorFieldMapper.TYPE_NAME)); + PercolatorQuery.Builder builder = new PercolatorQuery.Builder( + documentType, registry, document, docSearcher, percolateTypeQuery + ); + Settings indexSettings = registry.getIndexSettings().getSettings(); + if (indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null).onOrAfter(Version.V_5_0_0)) { + builder.extractQueryTermsQuery( + PercolatorFieldMapper.EXTRACTED_TERMS_FULL_FIELD_NAME, PercolatorFieldMapper.UNKNOWN_QUERY_FULL_FIELD_NAME + ); + } + return builder.build(); + } + + public String getDocumentType() { + return documentType; + } + + public BytesReference getDocument() { + return document; + } + + private IndexSearcher createMultiDocumentSearcher(DocumentMapper docMapper, Analyzer defaultAnalyzer, ParsedDocument doc) { + IndexReader[] memoryIndices = new IndexReader[doc.docs().size()]; + List docs = doc.docs(); + int rootDocIndex = docs.size() - 1; + assert rootDocIndex > 0; + for (int i = 0; i < docs.size(); i++) { + ParseContext.Document d = docs.get(i); + MemoryIndex memoryIndex = new MemoryIndex(true); + indexDoc(docMapper, defaultAnalyzer, d, memoryIndex); + memoryIndices[i] = memoryIndex.createSearcher().getIndexReader(); + } + try { + MultiReader mReader = new MultiReader(memoryIndices, true); + LeafReader slowReader = SlowCompositeReaderWrapper.wrap(mReader); + final IndexSearcher slowSearcher = new IndexSearcher(slowReader) { + + @Override + public Weight createNormalizedWeight(Query query, boolean needsScores) throws IOException { + BooleanQuery.Builder bq = new BooleanQuery.Builder(); + bq.add(query, BooleanClause.Occur.MUST); + bq.add(Queries.newNestedFilter(), BooleanClause.Occur.MUST_NOT); + return super.createNormalizedWeight(bq.build(), needsScores); + } + + }; + slowSearcher.setQueryCache(null); + return slowSearcher; + } catch (IOException e) { + throw new ElasticsearchException("Failed to create index for percolator with nested document ", e); + } + } + + private void indexDoc(DocumentMapper documentMapper, Analyzer defaultAnalyzer, ParseContext.Document document, + MemoryIndex memoryIndex) { + for (IndexableField field : document.getFields()) { + if (field.fieldType().indexOptions() == IndexOptions.NONE && field.name().equals(UidFieldMapper.NAME)) { + continue; + } + + Analyzer analyzer = defaultAnalyzer; + if (documentMapper != null && documentMapper.mappers().getMapper(field.name()) != null) { + analyzer = documentMapper.mappers().indexAnalyzer(); + } + try { + try (TokenStream tokenStream = field.tokenStream(analyzer, null)) { + if (tokenStream != null) { + memoryIndex.addField(field.name(), tokenStream, field.boost()); + } + } + } catch (IOException e) { + throw new ElasticsearchException("Failed to create token stream", e); + } + } + } + +} diff --git a/core/src/main/java/org/elasticsearch/index/query/PercolatorQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/PercolatorQueryParser.java new file mode 100644 index 00000000000..a559db59927 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/index/query/PercolatorQueryParser.java @@ -0,0 +1,134 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +public class PercolatorQueryParser implements QueryParser { + + public static final ParseField DOCUMENT_FIELD = new ParseField("document"); + public static final ParseField DOCUMENT_TYPE_FIELD = new ParseField("document_type"); + public static final ParseField INDEXED_DOCUMENT_FIELD_INDEX = new ParseField("index"); + public static final ParseField INDEXED_DOCUMENT_FIELD_TYPE = new ParseField("type"); + public static final ParseField INDEXED_DOCUMENT_FIELD_ID = new ParseField("id"); + public static final ParseField INDEXED_DOCUMENT_FIELD_ROUTING = new ParseField("routing"); + public static final ParseField INDEXED_DOCUMENT_FIELD_PREFERENCE = new ParseField("preference"); + public static final ParseField INDEXED_DOCUMENT_FIELD_VERSION = new ParseField("version"); + + @Override + public String[] names() { + return new String[]{PercolatorQueryBuilder.NAME}; + } + + @Override + public PercolatorQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException { + XContentParser parser = parseContext.parser(); + float boost = AbstractQueryBuilder.DEFAULT_BOOST; + + String documentType = null; + + String indexedDocumentIndex = null; + String indexedDocumentType = null; + String indexedDocumentId = null; + String indexedDocumentRouting = null; + String indexedDocumentPreference = null; + Long indexedDocumentVersion = null; + + BytesReference source = null; + + String queryName = null; + String currentFieldName = null; + + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + if (parseContext.parseFieldMatcher().match(currentFieldName, DOCUMENT_FIELD)) { + try (XContentBuilder builder = XContentFactory.jsonBuilder()) { + builder.copyCurrentStructure(parser); + builder.flush(); + source = builder.bytes(); + } + } else { + throw new ParsingException(parser.getTokenLocation(), "[" + PercolatorQueryBuilder.NAME + + "] query does not support [" + token + "]"); + } + } else if (token.isValue()) { + if (parseContext.parseFieldMatcher().match(currentFieldName, DOCUMENT_TYPE_FIELD)) { + documentType = parser.text(); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, INDEXED_DOCUMENT_FIELD_INDEX)) { + indexedDocumentIndex = parser.text(); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, INDEXED_DOCUMENT_FIELD_TYPE)) { + indexedDocumentType = parser.text(); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, INDEXED_DOCUMENT_FIELD_ID)) { + indexedDocumentId = parser.text(); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, INDEXED_DOCUMENT_FIELD_ROUTING)) { + indexedDocumentRouting = parser.text(); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, INDEXED_DOCUMENT_FIELD_PREFERENCE)) { + indexedDocumentPreference = parser.text(); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, INDEXED_DOCUMENT_FIELD_VERSION)) { + indexedDocumentVersion = parser.longValue(); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { + boost = parser.floatValue(); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { + queryName = parser.text(); + } else { + throw new ParsingException(parser.getTokenLocation(), "[" + PercolatorQueryBuilder.NAME + + "] query does not support [" + currentFieldName + "]"); + } + } else { + throw new ParsingException(parser.getTokenLocation(), "[" + PercolatorQueryBuilder.NAME + + "] query does not support [" + token + "]"); + } + } + + if (documentType == null) { + throw new IllegalArgumentException("[" + PercolatorQueryBuilder.NAME + "] query is missing required [" + + DOCUMENT_TYPE_FIELD.getPreferredName() + "] parameter"); + } + + PercolatorQueryBuilder queryBuilder; + if (source != null) { + queryBuilder = new PercolatorQueryBuilder(documentType, source); + } else if (indexedDocumentId != null) { + queryBuilder = new PercolatorQueryBuilder(documentType, indexedDocumentIndex, indexedDocumentType, + indexedDocumentId, indexedDocumentRouting, indexedDocumentPreference, indexedDocumentVersion); + } else { + throw new IllegalArgumentException("[" + PercolatorQueryBuilder.NAME + "] query, nothing to percolate"); + } + queryBuilder.queryName(queryName); + queryBuilder.boost(boost); + return queryBuilder; + } + + @Override + public PercolatorQueryBuilder getBuilderPrototype() { + return PercolatorQueryBuilder.PROTO; + } + +} diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryBuilders.java b/core/src/main/java/org/elasticsearch/index/query/QueryBuilders.java index 21c1f3ff695..f04f03fcbcd 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryBuilders.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryBuilders.java @@ -832,6 +832,18 @@ public abstract class QueryBuilders { return new ExistsQueryBuilder(name); } + public static PercolatorQueryBuilder percolatorQuery(String documentType, BytesReference document) { + return new PercolatorQueryBuilder(documentType, document); + } + + public static PercolatorQueryBuilder percolatorQuery(String documentType, String indexedDocumentIndex, + String indexedDocumentType, String indexedDocumentId, + String indexedDocumentRouting, String indexedDocumentPreference, + Long indexedDocumentVersion) { + return new PercolatorQueryBuilder(documentType, indexedDocumentIndex, indexedDocumentType, indexedDocumentId, + indexedDocumentRouting, indexedDocumentPreference, indexedDocumentVersion); + } + private QueryBuilders() { } diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java b/core/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java index e057aff06b1..11164659b3f 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.query; import org.elasticsearch.client.Client; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.fieldstats.FieldStatsProvider; import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.script.ScriptService; @@ -31,6 +32,7 @@ public class QueryRewriteContext { protected final IndexSettings indexSettings; protected final IndicesQueriesRegistry indicesQueriesRegistry; protected final QueryParseContext parseContext; + protected FieldStatsProvider fieldStatsProvider; public QueryRewriteContext(IndexSettings indexSettings, ScriptService scriptService, IndicesQueriesRegistry indicesQueriesRegistry) { this.scriptService = scriptService; @@ -39,6 +41,14 @@ public class QueryRewriteContext { this.parseContext = new QueryParseContext(indicesQueriesRegistry); } + public void setFieldStatsProvider(FieldStatsProvider fieldStatsProvider) { + this.fieldStatsProvider = fieldStatsProvider; + } + + public FieldStatsProvider getFieldStatsProvider() { + return fieldStatsProvider; + } + /** * Returns a clients to fetch resources from local or remove nodes. */ diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java b/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java index 6acd5272f89..a21b53cdf51 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java @@ -44,9 +44,9 @@ import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.core.StringFieldMapper; import org.elasticsearch.index.mapper.core.TextFieldMapper; import org.elasticsearch.index.mapper.object.ObjectMapper; +import org.elasticsearch.index.percolator.PercolatorQueryCache; import org.elasticsearch.index.query.support.InnerHitsQueryParserHelper; import org.elasticsearch.index.query.support.NestedScope; import org.elasticsearch.index.similarity.SimilarityService; @@ -87,13 +87,15 @@ public class QueryShardContext extends QueryRewriteContext { private final Map namedQueries = new HashMap<>(); private final MapperQueryParser queryParser = new MapperQueryParser(this); + private final IndicesQueriesRegistry indicesQueriesRegistry; + private final PercolatorQueryCache percolatorQueryCache; private boolean allowUnmappedFields; private boolean mapUnmappedFieldAsString; private NestedScope nestedScope; boolean isFilter; // pkg private for testing public QueryShardContext(IndexSettings indexSettings, BitsetFilterCache bitsetFilterCache, IndexFieldDataService indexFieldDataService, MapperService mapperService, SimilarityService similarityService, ScriptService scriptService, - final IndicesQueriesRegistry indicesQueriesRegistry) { + final IndicesQueriesRegistry indicesQueriesRegistry, PercolatorQueryCache percolatorQueryCache) { super(indexSettings, scriptService, indicesQueriesRegistry); this.indexSettings = indexSettings; this.similarityService = similarityService; @@ -101,17 +103,18 @@ public class QueryShardContext extends QueryRewriteContext { this.bitsetFilterCache = bitsetFilterCache; this.indexFieldDataService = indexFieldDataService; this.allowUnmappedFields = indexSettings.isDefaultAllowUnmappedFields(); - + this.indicesQueriesRegistry = indicesQueriesRegistry; + this.percolatorQueryCache = percolatorQueryCache; } public QueryShardContext(QueryShardContext source) { - this(source.indexSettings, source.bitsetFilterCache, source.indexFieldDataService, source.mapperService, source.similarityService, source.scriptService, source.indicesQueriesRegistry); + this(source.indexSettings, source.bitsetFilterCache, source.indexFieldDataService, source.mapperService, source.similarityService, source.scriptService, source.indicesQueriesRegistry, source.percolatorQueryCache); this.types = source.getTypes(); } public QueryShardContext clone() { - return new QueryShardContext(indexSettings, bitsetFilterCache, indexFieldDataService, mapperService, similarityService, scriptService, indicesQueriesRegistry); + return new QueryShardContext(indexSettings, bitsetFilterCache, indexFieldDataService, mapperService, similarityService, scriptService, indicesQueriesRegistry, percolatorQueryCache); } public void parseFieldMatcher(ParseFieldMatcher parseFieldMatcher) { @@ -148,6 +151,10 @@ public class QueryShardContext extends QueryRewriteContext { return mapperService; } + public PercolatorQueryCache getPercolatorQueryCache() { + return percolatorQueryCache; + } + public Similarity getSearchSimilarity() { return similarityService != null ? similarityService.similarity(mapperService) : null; } diff --git a/core/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java index cd99bec0f74..c3953a51170 100644 --- a/core/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java @@ -22,6 +22,10 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermRangeQuery; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.action.fieldstats.FieldStats; +import org.elasticsearch.action.fieldstats.IndexConstraint; +import org.elasticsearch.action.fieldstats.IndexConstraint.Comparison; +import org.elasticsearch.action.fieldstats.IndexConstraint.Property; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -30,6 +34,7 @@ import org.elasticsearch.common.joda.FormatDateTimeFormatter; import org.elasticsearch.common.joda.Joda; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.fieldstats.FieldStatsProvider; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.core.DateFieldMapper; import org.joda.time.DateTimeZone; @@ -253,6 +258,43 @@ public class RangeQueryBuilder extends AbstractQueryBuilder i return NAME; } + @Override + protected QueryBuilder doRewrite(QueryRewriteContext queryShardContext) throws IOException { + FieldStatsProvider fieldStatsProvider = queryShardContext.getFieldStatsProvider(); + // If the fieldStatsProvider is null we are not on the shard and cannot + // rewrite so just return without rewriting + if (fieldStatsProvider != null) { + DateMathParser dateMathParser = format == null ? null : new DateMathParser(format); + FieldStatsProvider.Relation relation = fieldStatsProvider.isFieldWithinQuery(fieldName, from, to, includeLower, includeUpper, + timeZone, dateMathParser); + switch (relation) { + case DISJOINT: + return new MatchNoneQueryBuilder(); + case WITHIN: + FieldStats fieldStats = fieldStatsProvider.get(fieldName); + if (!(fieldStats.getMinValue().equals(from) && fieldStats.getMaxValue().equals(to) && includeUpper && includeLower)) { + // Rebuild the range query with the bounds for this shard. + // The includeLower/Upper values are preserved only if the + // bound has not been changed by the rewrite + RangeQueryBuilder newRangeQuery = new RangeQueryBuilder(fieldName); + String dateFormatString = format == null ? null : format.format(); + newRangeQuery.from(fieldStats.getMinValue(), includeLower || fieldStats.match( + new IndexConstraint(fieldName, Property.MIN, Comparison.GT, fieldStats.stringValueOf(from, dateFormatString)))); + newRangeQuery.to(fieldStats.getMaxValue(), includeUpper || fieldStats.match( + new IndexConstraint(fieldName, Property.MAX, Comparison.LT, fieldStats.stringValueOf(to, dateFormatString)))); + newRangeQuery.format = format; + newRangeQuery.timeZone = timeZone; + return newRangeQuery; + } else { + return this; + } + case INTERSECTS: + break; + } + } + return this; + } + @Override protected Query doToQuery(QueryShardContext context) throws IOException { Query query = null; diff --git a/core/src/main/java/org/elasticsearch/index/query/support/NestedInnerQueryParseSupport.java b/core/src/main/java/org/elasticsearch/index/query/support/NestedInnerQueryParseSupport.java index 9923728e3bd..86983026b19 100644 --- a/core/src/main/java/org/elasticsearch/index/query/support/NestedInnerQueryParseSupport.java +++ b/core/src/main/java/org/elasticsearch/index/query/support/NestedInnerQueryParseSupport.java @@ -31,7 +31,6 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardException; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; @@ -61,8 +60,8 @@ public class NestedInnerQueryParseSupport { protected ObjectMapper nestedObjectMapper; private ObjectMapper parentObjectMapper; - public NestedInnerQueryParseSupport(XContentParser parser, SearchContext searchContext) { - shardContext = searchContext.getQueryShardContext(); + public NestedInnerQueryParseSupport(XContentParser parser, QueryShardContext context) { + shardContext = context; parseContext = shardContext.parseContext(); shardContext.reset(parser); diff --git a/core/src/main/java/org/elasticsearch/index/search/MatchQuery.java b/core/src/main/java/org/elasticsearch/index/search/MatchQuery.java index 979bfba605f..9cd587704cb 100644 --- a/core/src/main/java/org/elasticsearch/index/search/MatchQuery.java +++ b/core/src/main/java/org/elasticsearch/index/search/MatchQuery.java @@ -45,7 +45,6 @@ import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.support.QueryParsers; import java.io.IOException; -import java.util.List; public class MatchQuery { @@ -336,10 +335,10 @@ public class MatchQuery { return prefixQuery; } else if (query instanceof MultiPhraseQuery) { MultiPhraseQuery pq = (MultiPhraseQuery)query; - List terms = pq.getTermArrays(); + Term[][] terms = pq.getTermArrays(); int[] positions = pq.getPositions(); - for (int i = 0; i < terms.size(); i++) { - prefixQuery.add(terms.get(i), positions[i]); + for (int i = 0; i < terms.length; i++) { + prefixQuery.add(terms[i], positions[i]); } return prefixQuery; } else if (query instanceof TermQuery) { diff --git a/core/src/main/java/org/elasticsearch/index/shard/ElasticsearchMergePolicy.java b/core/src/main/java/org/elasticsearch/index/shard/ElasticsearchMergePolicy.java index c8d0379d701..adae6caf452 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/ElasticsearchMergePolicy.java +++ b/core/src/main/java/org/elasticsearch/index/shard/ElasticsearchMergePolicy.java @@ -49,7 +49,7 @@ import java.util.Map; * be stored as payloads to numeric doc values. */ public final class ElasticsearchMergePolicy extends MergePolicy { - + private static ESLogger logger = Loggers.getLogger(ElasticsearchMergePolicy.class); private final MergePolicy delegate; @@ -69,9 +69,7 @@ public final class ElasticsearchMergePolicy extends MergePolicy { /** Return an "upgraded" view of the reader. */ static CodecReader filter(CodecReader reader) throws IOException { - // convert 0.90.x _uid payloads to _version docvalues if needed - reader = VersionFieldUpgrader.wrap(reader); - // TODO: remove 0.90.x/1.x freqs/prox/payloads from _uid? + // TODO: remove 0.90.x/1.x freqs/prox/payloads from _uid? // the previous code never did this, so some indexes carry around trash. return reader; } @@ -157,7 +155,7 @@ public final class ElasticsearchMergePolicy extends MergePolicy { // TODO: Use IndexUpgradeMergePolicy instead. We should be comparing codecs, // for now we just assume every minor upgrade has a new format. - logger.debug("Adding segment " + info.info.name + " to be upgraded"); + logger.debug("Adding segment {} to be upgraded", info.info.name); spec.add(new OneMerge(Collections.singletonList(info))); } @@ -165,14 +163,14 @@ public final class ElasticsearchMergePolicy extends MergePolicy { if (spec.merges.size() == MAX_CONCURRENT_UPGRADE_MERGES) { // hit our max upgrades, so return the spec. we will get a cascaded call to continue. - logger.debug("Returning " + spec.merges.size() + " merges for upgrade"); + logger.debug("Returning {} merges for upgrade", spec.merges.size()); return spec; } } // We must have less than our max upgrade merges, so the next return will be our last in upgrading mode. if (spec.merges.isEmpty() == false) { - logger.debug("Returning " + spec.merges.size() + " merges for end of upgrade"); + logger.debug("Returning {} merges for end of upgrade", spec.merges.size()); return spec; } diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 4d35755e159..5d54a8c22c3 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -48,9 +48,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.Callback; import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.common.util.concurrent.SuspendableRefContainer; -import org.elasticsearch.gateway.MetaDataStateFormat; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.NodeServicesProvider; @@ -83,9 +81,7 @@ import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.merge.MergeStats; -import org.elasticsearch.index.percolator.PercolateStats; -import org.elasticsearch.index.percolator.PercolatorQueriesRegistry; -import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.index.percolator.PercolatorFieldMapper; import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.refresh.RefreshStats; import org.elasticsearch.index.search.stats.SearchStats; @@ -106,7 +102,6 @@ import org.elasticsearch.index.warmer.WarmerStats; import org.elasticsearch.indices.IndexingMemoryController; import org.elasticsearch.indices.recovery.RecoveryFailedException; import org.elasticsearch.indices.recovery.RecoveryState; -import org.elasticsearch.percolator.PercolatorService; import org.elasticsearch.search.suggest.completion.CompletionFieldStats; import org.elasticsearch.search.suggest.completion.CompletionStats; import org.elasticsearch.threadpool.ThreadPool; @@ -122,7 +117,6 @@ import java.util.List; import java.util.Map; import java.util.Objects; import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; @@ -140,7 +134,6 @@ public class IndexShard extends AbstractIndexShardComponent { private final ShardIndexWarmerService shardWarmerService; private final ShardRequestCache shardQueryCache; private final ShardFieldData shardFieldData; - private final PercolatorQueriesRegistry percolatorQueriesRegistry; private final IndexFieldDataService indexFieldDataService; private final ShardSuggestMetric shardSuggestMetric = new ShardSuggestMetric(); private final ShardBitsetFilterCache shardBitsetFilterCache; @@ -159,7 +152,6 @@ public class IndexShard extends AbstractIndexShardComponent { * being indexed/deleted. */ private final AtomicLong writingBytes = new AtomicLong(); - private volatile ScheduledFuture refreshScheduledFuture; protected volatile ShardRouting shardRouting; protected volatile IndexShardState state; protected final AtomicReference currentEngineReference = new AtomicReference<>(); @@ -201,7 +193,8 @@ public class IndexShard extends AbstractIndexShardComponent { public IndexShard(ShardId shardId, IndexSettings indexSettings, ShardPath path, Store store, IndexCache indexCache, MapperService mapperService, SimilarityService similarityService, IndexFieldDataService indexFieldDataService, @Nullable EngineFactory engineFactory, - IndexEventListener indexEventListener, IndexSearcherWrapper indexSearcherWrapper, NodeServicesProvider provider, SearchSlowLog slowLog, Engine.Warmer warmer, IndexingOperationListener... listeners) { + IndexEventListener indexEventListener, IndexSearcherWrapper indexSearcherWrapper, NodeServicesProvider provider, + SearchSlowLog slowLog, Engine.Warmer warmer, IndexingOperationListener... listeners) { super(shardId, indexSettings); final Settings settings = indexSettings.getSettings(); this.codecService = new CodecService(mapperService, logger); @@ -246,8 +239,6 @@ public class IndexShard extends AbstractIndexShardComponent { this.engineConfig = newEngineConfig(translogConfig, cachingPolicy); this.suspendableRefContainer = new SuspendableRefContainer(); this.searcherWrapper = indexSearcherWrapper; - QueryShardContext queryShardContext = new QueryShardContext(indexSettings, indexCache.bitsetFilterCache(), indexFieldDataService, mapperService, similarityService, provider.getScriptService(), provider.getIndicesQueriesRegistry()); - this.percolatorQueriesRegistry = new PercolatorQueriesRegistry(shardId, indexSettings, queryShardContext); } public Store store() { @@ -476,12 +467,8 @@ public class IndexShard extends AbstractIndexShardComponent { if (logger.isTraceEnabled()) { logger.trace("index [{}][{}]{}", index.type(), index.id(), index.docs()); } - final boolean isPercolatorQuery = percolatorQueriesRegistry.isPercolatorQuery(index); Engine engine = getEngine(); created = engine.index(index); - if (isPercolatorQuery) { - percolatorQueriesRegistry.updatePercolateQuery(engine, index.id()); - } index.endTime(System.nanoTime()); } catch (Throwable ex) { indexingOperationListeners.postIndex(index, ex); @@ -519,12 +506,8 @@ public class IndexShard extends AbstractIndexShardComponent { if (logger.isTraceEnabled()) { logger.trace("delete [{}]", delete.uid().text()); } - final boolean isPercolatorQuery = percolatorQueriesRegistry.isPercolatorQuery(delete); Engine engine = getEngine(); engine.delete(delete); - if (isPercolatorQuery) { - percolatorQueriesRegistry.updatePercolateQuery(engine, delete.id()); - } delete.endTime(System.nanoTime()); } catch (Throwable ex) { indexingOperationListeners.postDelete(delete, ex); @@ -644,10 +627,6 @@ public class IndexShard extends AbstractIndexShardComponent { return shardFieldData.stats(fields); } - public PercolatorQueriesRegistry percolateRegistry() { - return percolatorQueriesRegistry; - } - public TranslogStats translogStats() { return getEngine().getTranslog().stats(); } @@ -712,7 +691,7 @@ public class IndexShard extends AbstractIndexShardComponent { false, true, upgrade.upgradeOnlyAncientSegments()); org.apache.lucene.util.Version version = minimumCompatibleVersion(); if (logger.isTraceEnabled()) { - logger.trace("upgraded segment {} from version {} to version {}", previousVersion, version); + logger.trace("upgraded segments for {} from version {} to version {}", shardId, previousVersion, version); } return version; @@ -784,10 +763,6 @@ public class IndexShard extends AbstractIndexShardComponent { public void close(String reason, boolean flushEngine) throws IOException { synchronized (mutex) { try { - if (state != IndexShardState.CLOSED) { - FutureUtils.cancel(refreshScheduledFuture); - refreshScheduledFuture = null; - } changeState(IndexShardState.CLOSED, reason); } finally { final Engine engine = this.currentEngineReference.getAndSet(null); @@ -796,18 +771,15 @@ public class IndexShard extends AbstractIndexShardComponent { engine.flushAndClose(); } } finally { // playing safe here and close the engine even if the above succeeds - close can be called multiple times - IOUtils.close(engine, percolatorQueriesRegistry); + IOUtils.close(engine); } } } } public IndexShard postRecovery(String reason) throws IndexShardStartedException, IndexShardRelocatedException, IndexShardClosedException { - if (mapperService.hasMapping(PercolatorService.TYPE_NAME)) { + if (mapperService.hasMapping(PercolatorFieldMapper.TYPE_NAME)) { refresh("percolator_load_queries"); - try (Engine.Searcher searcher = getEngine().acquireSearcher("percolator_load_queries")) { - this.percolatorQueriesRegistry.loadQueries(searcher.reader()); - } } synchronized (mutex) { if (state == IndexShardState.CLOSED) { @@ -1104,10 +1076,6 @@ public class IndexShard extends AbstractIndexShardComponent { return getEngine().getTranslog(); } - public PercolateStats percolateStats() { - return percolatorQueriesRegistry.stats(); - } - public IndexEventListener getIndexEventListener() { return indexEventListener; } diff --git a/core/src/main/java/org/elasticsearch/index/shard/ShardId.java b/core/src/main/java/org/elasticsearch/index/shard/ShardId.java index 3dea5501c62..a9bc63ae44f 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/ShardId.java +++ b/core/src/main/java/org/elasticsearch/index/shard/ShardId.java @@ -98,7 +98,7 @@ public class ShardId implements Streamable, Comparable { @Override public void readFrom(StreamInput in) throws IOException { - index = Index.readIndex(in); + index = new Index(in); shardId = in.readVInt(); hashCode = computeHashCode(); } diff --git a/core/src/main/java/org/elasticsearch/index/shard/ShardPath.java b/core/src/main/java/org/elasticsearch/index/shard/ShardPath.java index 3d6fbf08102..be0d51bd2b6 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/ShardPath.java +++ b/core/src/main/java/org/elasticsearch/index/shard/ShardPath.java @@ -29,7 +29,6 @@ import java.io.IOException; import java.nio.file.FileStore; import java.nio.file.Files; import java.nio.file.Path; -import java.util.HashMap; import java.util.Map; public final class ShardPath { @@ -37,22 +36,20 @@ public final class ShardPath { public static final String TRANSLOG_FOLDER_NAME = "translog"; private final Path path; - private final String indexUUID; private final ShardId shardId; private final Path shardStatePath; private final boolean isCustomDataPath; - public ShardPath(boolean isCustomDataPath, Path dataPath, Path shardStatePath, String indexUUID, ShardId shardId) { + public ShardPath(boolean isCustomDataPath, Path dataPath, Path shardStatePath, ShardId shardId) { assert dataPath.getFileName().toString().equals(Integer.toString(shardId.id())) : "dataPath must end with the shard ID but didn't: " + dataPath.toString(); assert shardStatePath.getFileName().toString().equals(Integer.toString(shardId.id())) : "shardStatePath must end with the shard ID but didn't: " + dataPath.toString(); - assert dataPath.getParent().getFileName().toString().equals(shardId.getIndexName()) : "dataPath must end with index/shardID but didn't: " + dataPath.toString(); - assert shardStatePath.getParent().getFileName().toString().equals(shardId.getIndexName()) : "shardStatePath must end with index/shardID but didn't: " + dataPath.toString(); + assert dataPath.getParent().getFileName().toString().equals(shardId.getIndex().getUUID()) : "dataPath must end with index path id but didn't: " + dataPath.toString(); + assert shardStatePath.getParent().getFileName().toString().equals(shardId.getIndex().getUUID()) : "shardStatePath must end with index path id but didn't: " + dataPath.toString(); if (isCustomDataPath && dataPath.equals(shardStatePath)) { throw new IllegalArgumentException("shard state path must be different to the data path when using custom data paths"); } this.isCustomDataPath = isCustomDataPath; this.path = dataPath; - this.indexUUID = indexUUID; this.shardId = shardId; this.shardStatePath = shardStatePath; } @@ -73,10 +70,6 @@ public final class ShardPath { return Files.exists(path); } - public String getIndexUUID() { - return indexUUID; - } - public ShardId getShardId() { return shardId; } @@ -144,7 +137,7 @@ public final class ShardPath { dataPath = statePath; } logger.debug("{} loaded data path [{}], state path [{}]", shardId, dataPath, statePath); - return new ShardPath(indexSettings.hasCustomDataPath(), dataPath, statePath, indexUUID, shardId); + return new ShardPath(indexSettings.hasCustomDataPath(), dataPath, statePath, shardId); } } @@ -168,34 +161,6 @@ public final class ShardPath { } } - /** Maps each path.data path to a "guess" of how many bytes the shards allocated to that path might additionally use over their - * lifetime; we do this so a bunch of newly allocated shards won't just all go the path with the most free space at this moment. */ - private static Map getEstimatedReservedBytes(NodeEnvironment env, long avgShardSizeInBytes, Iterable shards) throws IOException { - long totFreeSpace = 0; - for (NodeEnvironment.NodePath nodePath : env.nodePaths()) { - totFreeSpace += nodePath.fileStore.getUsableSpace(); - } - - // Very rough heuristic of how much disk space we expect the shard will use over its lifetime, the max of current average - // shard size across the cluster and 5% of the total available free space on this node: - long estShardSizeInBytes = Math.max(avgShardSizeInBytes, (long) (totFreeSpace/20.0)); - - // Collate predicted (guessed!) disk usage on each path.data: - Map reservedBytes = new HashMap<>(); - for (IndexShard shard : shards) { - Path dataPath = NodeEnvironment.shardStatePathToDataPath(shard.shardPath().getShardStatePath()); - - // Remove indices// subdirs from the statePath to get back to the path.data/: - Long curBytes = reservedBytes.get(dataPath); - if (curBytes == null) { - curBytes = 0L; - } - reservedBytes.put(dataPath, curBytes + estShardSizeInBytes); - } - - return reservedBytes; - } - public static ShardPath selectNewPathForShard(NodeEnvironment env, ShardId shardId, IndexSettings indexSettings, long avgShardSizeInBytes, Map dataPathToShardCount) throws IOException { @@ -206,7 +171,6 @@ public final class ShardPath { dataPath = env.resolveCustomLocation(indexSettings, shardId); statePath = env.nodePaths()[0].resolve(shardId); } else { - long totFreeSpace = 0; for (NodeEnvironment.NodePath nodePath : env.nodePaths()) { totFreeSpace += nodePath.fileStore.getUsableSpace(); @@ -241,9 +205,7 @@ public final class ShardPath { statePath = bestPath.resolve(shardId); dataPath = statePath; } - - final String indexUUID = indexSettings.getUUID(); - return new ShardPath(indexSettings.hasCustomDataPath(), dataPath, statePath, indexUUID, shardId); + return new ShardPath(indexSettings.hasCustomDataPath(), dataPath, statePath, shardId); } @Override @@ -258,9 +220,6 @@ public final class ShardPath { if (shardId != null ? !shardId.equals(shardPath.shardId) : shardPath.shardId != null) { return false; } - if (indexUUID != null ? !indexUUID.equals(shardPath.indexUUID) : shardPath.indexUUID != null) { - return false; - } if (path != null ? !path.equals(shardPath.path) : shardPath.path != null) { return false; } @@ -271,7 +230,6 @@ public final class ShardPath { @Override public int hashCode() { int result = path != null ? path.hashCode() : 0; - result = 31 * result + (indexUUID != null ? indexUUID.hashCode() : 0); result = 31 * result + (shardId != null ? shardId.hashCode() : 0); return result; } @@ -280,7 +238,6 @@ public final class ShardPath { public String toString() { return "ShardPath{" + "path=" + path + - ", indexUUID='" + indexUUID + '\'' + ", shard=" + shardId + '}'; } diff --git a/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index e057349223d..d11e6734025 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -128,9 +128,8 @@ final class StoreRecovery { assert shardState != IndexShardState.CREATED && shardState != IndexShardState.RECOVERING : "recovery process of " + shardId + " didn't get to post_recovery. shardState [" + shardState + "]"; if (logger.isTraceEnabled()) { - StringBuilder sb = new StringBuilder(); - sb.append("recovery completed from ").append("shard_store").append(", took [").append(timeValueMillis(recoveryState.getTimer().time())).append("]\n"); RecoveryState.Index index = recoveryState.getIndex(); + StringBuilder sb = new StringBuilder(); sb.append(" index : files [").append(index.totalFileCount()).append("] with total_size [") .append(new ByteSizeValue(index.totalBytes())).append("], took[") .append(TimeValue.timeValueMillis(index.time())).append("]\n"); @@ -142,7 +141,7 @@ final class StoreRecovery { .append(timeValueMillis(recoveryState.getVerifyIndex().checkIndexTime())).append("]\n"); sb.append(" translog : number_of_operations [").append(recoveryState.getTranslog().recoveredOperations()) .append("], took [").append(TimeValue.timeValueMillis(recoveryState.getTranslog().time())).append("]"); - logger.trace(sb.toString()); + logger.trace("recovery completed from [shard_store], took [{}]\n{}", timeValueMillis(recoveryState.getTimer().time()), sb); } else if (logger.isDebugEnabled()) { logger.debug("recovery completed from [shard_store], took [{}]", timeValueMillis(recoveryState.getTimer().time())); } diff --git a/core/src/main/java/org/elasticsearch/index/shard/VersionFieldUpgrader.java b/core/src/main/java/org/elasticsearch/index/shard/VersionFieldUpgrader.java deleted file mode 100644 index 42bd5420ac3..00000000000 --- a/core/src/main/java/org/elasticsearch/index/shard/VersionFieldUpgrader.java +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.shard; - -import org.apache.lucene.codecs.DocValuesProducer; -import org.apache.lucene.index.CodecReader; -import org.apache.lucene.index.DocValuesType; -import org.apache.lucene.index.FieldInfo; -import org.apache.lucene.index.FieldInfos; -import org.apache.lucene.index.FilterCodecReader; -import org.apache.lucene.index.IndexOptions; -import org.apache.lucene.index.NumericDocValues; -import org.apache.lucene.index.PostingsEnum; -import org.apache.lucene.index.Terms; -import org.apache.lucene.index.TermsEnum; -import org.apache.lucene.search.DocIdSetIterator; -import org.apache.lucene.util.Bits; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.packed.GrowableWriter; -import org.apache.lucene.util.packed.PackedInts; -import org.elasticsearch.common.Numbers; -import org.elasticsearch.index.mapper.internal.UidFieldMapper; -import org.elasticsearch.index.mapper.internal.VersionFieldMapper; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; - -/** - * Converts 0.90.x _uid payloads to _version docvalues - */ -class VersionFieldUpgrader extends FilterCodecReader { - final FieldInfos infos; - - VersionFieldUpgrader(CodecReader in) { - super(in); - - // Find a free field number - int fieldNumber = 0; - for (FieldInfo fi : in.getFieldInfos()) { - fieldNumber = Math.max(fieldNumber, fi.number + 1); - } - - // TODO: lots of things can wrong here... - FieldInfo newInfo = new FieldInfo(VersionFieldMapper.NAME, // field name - fieldNumber, // field number - false, // store term vectors - false, // omit norms - false, // store payloads - IndexOptions.NONE, // index options - DocValuesType.NUMERIC, // docvalues - -1, // docvalues generation - Collections.emptyMap() // attributes - ); - newInfo.checkConsistency(); // fail merge immediately if above code is wrong - - final ArrayList fieldInfoList = new ArrayList<>(); - for (FieldInfo info : in.getFieldInfos()) { - if (!info.name.equals(VersionFieldMapper.NAME)) { - fieldInfoList.add(info); - } - } - fieldInfoList.add(newInfo); - infos = new FieldInfos(fieldInfoList.toArray(new FieldInfo[fieldInfoList.size()])); - } - - static CodecReader wrap(CodecReader reader) throws IOException { - final FieldInfos fieldInfos = reader.getFieldInfos(); - final FieldInfo versionInfo = fieldInfos.fieldInfo(VersionFieldMapper.NAME); - if (versionInfo != null && versionInfo.getDocValuesType() != DocValuesType.NONE) { - // the reader is a recent one, it has versions and they are stored - // in a numeric doc values field - return reader; - } - // The segment is an old one, look at the _uid field - final Terms terms = reader.terms(UidFieldMapper.NAME); - if (terms == null || !terms.hasPayloads()) { - // The segment doesn't have an _uid field or doesn't have payloads - // don't try to do anything clever. If any other segment has versions - // all versions of this segment will be initialized to 0 - return reader; - } - // convert _uid payloads -> _version docvalues - return new VersionFieldUpgrader(reader); - } - - @Override - public FieldInfos getFieldInfos() { - return infos; - } - - @Override - public DocValuesProducer getDocValuesReader() { - DocValuesProducer producer = in.getDocValuesReader(); - // TODO: move this nullness stuff out - if (producer == null) { - producer = FilterDocValuesProducer.EMPTY; - } - return new UninvertedVersions(producer, this); - } - - static class UninvertedVersions extends FilterDocValuesProducer { - final CodecReader reader; - - UninvertedVersions(DocValuesProducer in, CodecReader reader) { - super(in); - this.reader = reader; - } - - @Override - public NumericDocValues getNumeric(FieldInfo field) throws IOException { - if (VersionFieldMapper.NAME.equals(field.name)) { - // uninvert into a packed ints and expose as docvalues - final Terms terms = reader.terms(UidFieldMapper.NAME); - final TermsEnum uids = terms.iterator(); - final GrowableWriter versions = new GrowableWriter(2, reader.maxDoc(), PackedInts.COMPACT); - PostingsEnum dpe = null; - for (BytesRef uid = uids.next(); uid != null; uid = uids.next()) { - dpe = uids.postings(dpe, PostingsEnum.PAYLOADS); - assert terms.hasPayloads() : "field has payloads"; - final Bits liveDocs = reader.getLiveDocs(); - for (int doc = dpe.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = dpe.nextDoc()) { - if (liveDocs != null && liveDocs.get(doc) == false) { - continue; - } - dpe.nextPosition(); - final BytesRef payload = dpe.getPayload(); - if (payload != null && payload.length == 8) { - final long version = Numbers.bytesToLong(payload); - versions.set(doc, version); - break; - } - } - } - return versions; - } else { - return in.getNumeric(field); - } - } - - @Override - public Bits getDocsWithField(FieldInfo field) throws IOException { - if (VersionFieldMapper.NAME.equals(field.name)) { - return new Bits.MatchAllBits(reader.maxDoc()); - } else { - return in.getDocsWithField(field); - } - } - - @Override - public DocValuesProducer getMergeInstance() throws IOException { - return new UninvertedVersions(in.getMergeInstance(), reader); - } - } -} diff --git a/core/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java b/core/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java index e950ebda1b3..edbebe8f033 100644 --- a/core/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java +++ b/core/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java @@ -21,6 +21,7 @@ package org.elasticsearch.index.similarity; import org.apache.lucene.search.similarities.PerFieldSimilarityWrapper; import org.apache.lucene.search.similarities.Similarity; +import org.elasticsearch.Version; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.IndexModule; @@ -63,6 +64,10 @@ public final class SimilarityService extends AbstractIndexComponent { Map similaritySettings = this.indexSettings.getSettings().getGroups(IndexModule.SIMILARITY_SETTINGS_PREFIX); for (Map.Entry entry : similaritySettings.entrySet()) { String name = entry.getKey(); + // Starting with v5.0 indices, it should no longer be possible to redefine built-in similarities + if(BUILT_IN.containsKey(name) && indexSettings.getIndexVersionCreated().onOrAfter(Version.V_5_0_0)) { + throw new IllegalArgumentException("Cannot redefine built-in Similarity [" + name + "]"); + } Settings settings = entry.getValue(); String typeName = settings.get("type"); if (typeName == null) { @@ -76,9 +81,16 @@ public final class SimilarityService extends AbstractIndexComponent { } providers.put(name, factory.apply(name, settings)); } - addSimilarities(similaritySettings, providers, DEFAULTS); + for (Map.Entry entry : addSimilarities(similaritySettings, DEFAULTS).entrySet()) { + // Avoid overwriting custom providers for indices older that v5.0 + if (providers.containsKey(entry.getKey()) && indexSettings.getIndexVersionCreated().before(Version.V_5_0_0)) { + continue; + } + providers.put(entry.getKey(), entry.getValue()); + } this.similarities = providers; - defaultSimilarity = providers.get(SimilarityService.DEFAULT_SIMILARITY).get(); + defaultSimilarity = (providers.get("default") != null) ? providers.get("default").get() + : providers.get(SimilarityService.DEFAULT_SIMILARITY).get(); // Expert users can configure the base type as being different to default, but out-of-box we use default. baseSimilarity = (providers.get("base") != null) ? providers.get("base").get() : defaultSimilarity; @@ -90,7 +102,9 @@ public final class SimilarityService extends AbstractIndexComponent { defaultSimilarity; } - private void addSimilarities(Map similaritySettings, Map providers, Map> similarities) { + private Map addSimilarities(Map similaritySettings, + Map> similarities) { + Map providers = new HashMap<>(similarities.size()); for (Map.Entry> entry : similarities.entrySet()) { String name = entry.getKey(); BiFunction factory = entry.getValue(); @@ -100,12 +114,17 @@ public final class SimilarityService extends AbstractIndexComponent { } providers.put(name, factory.apply(name, settings)); } + return providers; } public SimilarityProvider getSimilarity(String name) { return similarities.get(name); } + public SimilarityProvider getDefaultSimilarity() { + return similarities.get("default"); + } + static class PerFieldSimilarity extends PerFieldSimilarityWrapper { private final Similarity defaultSimilarity; diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java index 330787a68a3..c15d2cfcdbe 100644 --- a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java +++ b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java @@ -32,9 +32,9 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.SnapshotId; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; diff --git a/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java b/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java index 06bc6a84a88..584b98cff33 100644 --- a/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java +++ b/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java @@ -36,7 +36,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; @@ -61,8 +61,9 @@ public class FsDirectoryService extends DirectoryService implements StoreRateLim return SimpleFSLockFactory.INSTANCE; default: throw new IllegalArgumentException("unrecognized [index.store.fs.fs_lock] \"" + s + "\": must be native or simple"); - } - }, false, Setting.Scope.INDEX); + } // can we set on both - node and index level, some nodes might be running on NFS so they might need simple rather than native + }, Property.IndexScope, Property.NodeScope); + private final CounterMetric rateLimitingTimeInNanos = new CounterMetric(); private final ShardPath path; @@ -108,7 +109,8 @@ public class FsDirectoryService extends DirectoryService implements StoreRateLim protected Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException { - final String storeType = indexSettings.getSettings().get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.DEFAULT.getSettingsKey()); + final String storeType = indexSettings.getSettings().get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), + IndexModule.Type.DEFAULT.getSettingsKey()); if (IndexModule.Type.FS.match(storeType) || IndexModule.Type.DEFAULT.match(storeType)) { final FSDirectory open = FSDirectory.open(location, lockFactory); // use lucene defaults if (open instanceof MMapDirectory && Constants.WINDOWS == false) { diff --git a/core/src/main/java/org/elasticsearch/index/store/IndexStore.java b/core/src/main/java/org/elasticsearch/index/store/IndexStore.java index e98ad7cc6eb..9e01d871765 100644 --- a/core/src/main/java/org/elasticsearch/index/store/IndexStore.java +++ b/core/src/main/java/org/elasticsearch/index/store/IndexStore.java @@ -21,6 +21,7 @@ package org.elasticsearch.index.store; import org.apache.lucene.store.StoreRateLimiting; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.IndexSettings; @@ -29,8 +30,12 @@ import org.elasticsearch.index.shard.ShardPath; * */ public class IndexStore extends AbstractIndexComponent { - public static final Setting INDEX_STORE_THROTTLE_TYPE_SETTING = new Setting<>("index.store.throttle.type", "none", IndexRateLimitingType::fromString, true, Setting.Scope.INDEX) ; - public static final Setting INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting("index.store.throttle.max_bytes_per_sec", new ByteSizeValue(0), true, Setting.Scope.INDEX); + public static final Setting INDEX_STORE_THROTTLE_TYPE_SETTING = + new Setting<>("index.store.throttle.type", "none", IndexRateLimitingType::fromString, + Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING = + Setting.byteSizeSetting("index.store.throttle.max_bytes_per_sec", new ByteSizeValue(0), + Property.Dynamic, Property.IndexScope); protected final IndexStoreConfig indexStoreConfig; private final StoreRateLimiting rateLimiting = new StoreRateLimiting(); diff --git a/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java b/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java index ab7075afa5b..12558bb9554 100644 --- a/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java +++ b/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java @@ -22,6 +22,7 @@ import org.apache.lucene.store.StoreRateLimiting; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; @@ -36,11 +37,15 @@ public class IndexStoreConfig { /** * Configures the node / cluster level throttle type. See {@link StoreRateLimiting.Type}. */ - public static final Setting INDICES_STORE_THROTTLE_TYPE_SETTING = new Setting<>("indices.store.throttle.type", StoreRateLimiting.Type.NONE.name(),StoreRateLimiting.Type::fromString, true, Setting.Scope.CLUSTER); + public static final Setting INDICES_STORE_THROTTLE_TYPE_SETTING = + new Setting<>("indices.store.throttle.type", StoreRateLimiting.Type.NONE.name(),StoreRateLimiting.Type::fromString, + Property.Dynamic, Property.NodeScope); /** * Configures the node / cluster level throttle intensity. The default is 10240 MB */ - public static final Setting INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting("indices.store.throttle.max_bytes_per_sec", new ByteSizeValue(0), true, Setting.Scope.CLUSTER); + public static final Setting INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING = + Setting.byteSizeSetting("indices.store.throttle.max_bytes_per_sec", new ByteSizeValue(0), + Property.Dynamic, Property.NodeScope); private volatile StoreRateLimiting.Type rateLimitingType; private volatile ByteSizeValue rateLimitingThrottle; private final StoreRateLimiting rateLimiting = new StoreRateLimiting(); diff --git a/core/src/main/java/org/elasticsearch/index/store/Store.java b/core/src/main/java/org/elasticsearch/index/store/Store.java index 77e7f32f5f5..e0ed3bc98b7 100644 --- a/core/src/main/java/org/elasticsearch/index/store/Store.java +++ b/core/src/main/java/org/elasticsearch/index/store/Store.java @@ -49,7 +49,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; @@ -61,6 +60,7 @@ import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.store.ByteArrayIndexInput; import org.elasticsearch.common.lucene.store.InputStreamIndexInput; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.Callback; @@ -90,7 +90,6 @@ import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.zip.Adler32; import java.util.zip.CRC32; import java.util.zip.Checksum; @@ -124,7 +123,8 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref static final int VERSION_START = 0; static final int VERSION = VERSION_WRITE_THROWABLE; static final String CORRUPTED = "corrupted_"; - public static final Setting INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING = Setting.timeSetting("index.store.stats_refresh_interval", TimeValue.timeValueSeconds(10), false, Setting.Scope.INDEX); + public static final Setting INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING = + Setting.timeSetting("index.store.stats_refresh_interval", TimeValue.timeValueSeconds(10), Property.IndexScope); private final AtomicBoolean isClosed = new AtomicBoolean(false); private final StoreDirectory directory; @@ -379,7 +379,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref if (isClosed.compareAndSet(false, true)) { // only do this once! decRef(); - logger.debug("store reference count on close: " + refCounter.refCount()); + logger.debug("store reference count on close: {}", refCounter.refCount()); } } diff --git a/core/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java b/core/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java index cfd5dc8f066..9f712c77e70 100644 --- a/core/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java +++ b/core/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java @@ -36,7 +36,7 @@ import java.util.Objects; */ public class StoreFileMetaData implements Writeable { - public static final Version FIRST_LUCENE_CHECKSUM_VERSION = Version.LUCENE_4_8_0; + public static final Version FIRST_LUCENE_CHECKSUM_VERSION = Version.LUCENE_5_0_0; private final String name; diff --git a/core/src/main/java/org/elasticsearch/index/translog/Checkpoint.java b/core/src/main/java/org/elasticsearch/index/translog/Checkpoint.java index cd0f94567f3..54ba8638eb2 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/Checkpoint.java +++ b/core/src/main/java/org/elasticsearch/index/translog/Checkpoint.java @@ -22,7 +22,6 @@ import org.apache.lucene.store.ByteArrayDataOutput; import org.apache.lucene.store.DataInput; import org.apache.lucene.store.DataOutput; import org.apache.lucene.store.InputStreamDataInput; -import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.Channels; import java.io.IOException; @@ -36,9 +35,9 @@ import java.nio.file.Path; */ class Checkpoint { - static final int BUFFER_SIZE = RamUsageEstimator.NUM_BYTES_INT // ops - + RamUsageEstimator.NUM_BYTES_LONG // offset - + RamUsageEstimator.NUM_BYTES_LONG;// generation + static final int BUFFER_SIZE = Integer.BYTES // ops + + Long.BYTES // offset + + Long.BYTES;// generation final long offset; final int numOps; final long generation; diff --git a/core/src/main/java/org/elasticsearch/index/translog/Translog.java b/core/src/main/java/org/elasticsearch/index/translog/Translog.java index 5a4438f426d..31b8db03141 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/core/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -418,10 +418,10 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC try { final BufferedChecksumStreamOutput checksumStreamOutput = new BufferedChecksumStreamOutput(out); final long start = out.position(); - out.skip(RamUsageEstimator.NUM_BYTES_INT); + out.skip(Integer.BYTES); writeOperationNoSize(checksumStreamOutput, operation); final long end = out.position(); - final int operationSize = (int) (end - RamUsageEstimator.NUM_BYTES_INT - start); + final int operationSize = (int) (end - Integer.BYTES - start); out.seek(start); out.writeInt(operationSize); out.seek(end); @@ -636,7 +636,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC @Override public long ramBytesUsed() { - return RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + 2 * RamUsageEstimator.NUM_BYTES_LONG + RamUsageEstimator.NUM_BYTES_INT; + return RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + 2 * Long.BYTES + Integer.BYTES; } @Override @@ -1144,10 +1144,10 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC for (Operation op : toWrite) { out.reset(); final long start = out.position(); - out.skip(RamUsageEstimator.NUM_BYTES_INT); + out.skip(Integer.BYTES); writeOperationNoSize(checksumStreamOutput, op); long end = out.position(); - int operationSize = (int) (out.position() - RamUsageEstimator.NUM_BYTES_INT - start); + int operationSize = (int) (out.position() - Integer.BYTES - start); out.seek(start); out.writeInt(operationSize); out.seek(end); diff --git a/core/src/main/java/org/elasticsearch/index/translog/TranslogReader.java b/core/src/main/java/org/elasticsearch/index/translog/TranslogReader.java index ecc3822361c..fcb3daea796 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TranslogReader.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TranslogReader.java @@ -26,7 +26,6 @@ import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.InputStreamDataInput; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.Channels; import org.elasticsearch.common.io.stream.InputStreamStreamInput; @@ -116,7 +115,7 @@ public class TranslogReader extends BaseTranslogReader implements Closeable { if (uuidBytes.bytesEquals(ref) == false) { throw new TranslogCorruptedException("expected shard UUID [" + uuidBytes + "] but got: [" + ref + "] this translog file belongs to a different translog. path:" + path); } - return new TranslogReader(checkpoint.generation, channel, path, ref.length + CodecUtil.headerLength(TranslogWriter.TRANSLOG_CODEC) + RamUsageEstimator.NUM_BYTES_INT, checkpoint.offset, checkpoint.numOps); + return new TranslogReader(checkpoint.generation, channel, path, ref.length + CodecUtil.headerLength(TranslogWriter.TRANSLOG_CODEC) + Integer.BYTES, checkpoint.offset, checkpoint.numOps); default: throw new TranslogCorruptedException("No known translog stream version: " + version + " path:" + path); } diff --git a/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java b/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java index a1fc708ddaf..e215669761c 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java @@ -24,7 +24,6 @@ import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.OutputStreamDataOutput; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IOUtils; -import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.Channels; import org.elasticsearch.common.unit.ByteSizeValue; @@ -76,7 +75,7 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { } private static int getHeaderLength(int uuidLength) { - return CodecUtil.headerLength(TRANSLOG_CODEC) + uuidLength + RamUsageEstimator.NUM_BYTES_INT; + return CodecUtil.headerLength(TRANSLOG_CODEC) + uuidLength + Integer.BYTES; } public static TranslogWriter create(ShardId shardId, String translogUUID, long fileGeneration, Path file, ChannelFactory channelFactory, ByteSizeValue bufferSize) throws IOException { diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java b/core/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java index 926ff482248..bd01e7f0183 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java @@ -30,10 +30,9 @@ import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.ShardCoreKeyMap; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Scope; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.cache.query.QueryCacheStats; @@ -50,9 +49,9 @@ import java.util.concurrent.ConcurrentHashMap; public class IndicesQueryCache extends AbstractComponent implements QueryCache, Closeable { public static final Setting INDICES_CACHE_QUERY_SIZE_SETTING = Setting.byteSizeSetting( - "indices.queries.cache.size", "10%", false, Scope.CLUSTER); + "indices.queries.cache.size", "10%", Property.NodeScope); public static final Setting INDICES_CACHE_QUERY_COUNT_SETTING = Setting.intSetting( - "indices.queries.cache.count", 10000, 1, false, Scope.CLUSTER); + "indices.queries.cache.count", 10000, 1, Property.NodeScope); private final LRUQueryCache cache; private final ShardCoreKeyMap shardKeyMap = new ShardCoreKeyMap(); diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java b/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java index 575153c8ada..4b4aa4e8df2 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.cache.RemovalNotification; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -68,12 +69,12 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo * A setting to enable or disable request caching on an index level. Its dynamic by default * since we are checking on the cluster state IndexMetaData always. */ - public static final Setting INDEX_CACHE_REQUEST_ENABLED_SETTING = Setting.boolSetting("index.requests.cache.enable", - false, true, Setting.Scope.INDEX); - public static final Setting INDICES_CACHE_QUERY_SIZE = Setting.byteSizeSetting("indices.requests.cache.size", "1%", - false, Setting.Scope.CLUSTER); - public static final Setting INDICES_CACHE_QUERY_EXPIRE = Setting.positiveTimeSetting("indices.requests.cache.expire", - new TimeValue(0), false, Setting.Scope.CLUSTER); + public static final Setting INDEX_CACHE_REQUEST_ENABLED_SETTING = + Setting.boolSetting("index.requests.cache.enable", true, Property.Dynamic, Property.IndexScope); + public static final Setting INDICES_CACHE_QUERY_SIZE = + Setting.byteSizeSetting("indices.requests.cache.size", "1%", Property.NodeScope); + public static final Setting INDICES_CACHE_QUERY_EXPIRE = + Setting.positiveTimeSetting("indices.requests.cache.expire", new TimeValue(0), Property.NodeScope); private final ConcurrentMap registeredClosedListeners = ConcurrentCollections.newConcurrentMap(); private final Set keysToClean = ConcurrentCollections.newConcurrentSet(); @@ -228,7 +229,7 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo @Override public long ramBytesUsed() { - return RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_LONG + value.length(); + return RamUsageEstimator.NUM_BYTES_OBJECT_REF + Long.BYTES + value.length(); } @Override diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java index 440a11a1904..b43d33b1bd9 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -30,10 +30,10 @@ import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag; import org.elasticsearch.action.admin.indices.stats.IndexShardStats; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.search.SearchType; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.bytes.BytesReference; @@ -47,6 +47,7 @@ import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -92,7 +93,6 @@ import java.nio.file.Files; import java.util.ArrayList; import java.util.EnumSet; import java.util.HashMap; -import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -103,6 +103,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Predicate; +import java.util.stream.Collectors; import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; @@ -115,7 +116,8 @@ import static org.elasticsearch.common.util.CollectionUtils.arrayAsArrayList; public class IndicesService extends AbstractLifecycleComponent implements Iterable, IndexService.ShardStoreDeleter { public static final String INDICES_SHARDS_CLOSED_TIMEOUT = "indices.shards_closed_timeout"; - public static final Setting INDICES_CACHE_CLEAN_INTERVAL_SETTING = Setting.positiveTimeSetting("indices.cache.cleanup_interval", TimeValue.timeValueMinutes(1), false, Setting.Scope.CLUSTER); + public static final Setting INDICES_CACHE_CLEAN_INTERVAL_SETTING = + Setting.positiveTimeSetting("indices.cache.cleanup_interval", TimeValue.timeValueMinutes(1), Property.NodeScope); private final PluginsService pluginsService; private final NodeEnvironment nodeEnv; private final TimeValue shardsClosedTimeout; @@ -185,14 +187,14 @@ public class IndicesService extends AbstractLifecycleComponent i ExecutorService indicesStopExecutor = Executors.newFixedThreadPool(5, EsExecutors.daemonThreadFactory("indices_shutdown")); // Copy indices because we modify it asynchronously in the body of the loop - Set indices = new HashSet<>(this.indices.keySet()); + final Set indices = this.indices.values().stream().map(s -> s.index()).collect(Collectors.toSet()); final CountDownLatch latch = new CountDownLatch(indices.size()); - for (final String index : indices) { + for (final Index index : indices) { indicesStopExecutor.execute(() -> { try { removeIndex(index, "shutdown", false); } catch (Throwable e) { - logger.warn("failed to remove index on stop [" + index + "]", e); + logger.warn("failed to remove index on stop [{}]", e, index); } finally { latch.countDown(); } @@ -256,13 +258,13 @@ public class IndicesService extends AbstractLifecycleComponent i } Map> statsByShard = new HashMap<>(); - for (IndexService indexService : indices.values()) { + for (IndexService indexService : this) { for (IndexShard indexShard : indexService) { try { if (indexShard.routingEntry() == null) { continue; } - IndexShardStats indexShardStats = new IndexShardStats(indexShard.shardId(), new ShardStats[] { new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesQueryCache, indexShard, flags), indexShard.commitStats()) }); + IndexShardStats indexShardStats = new IndexShardStats(indexShard.shardId(), new ShardStats[] { new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesQueryCache, indexService.cache().getPercolatorQueryCache(), indexShard, flags), indexShard.commitStats()) }); if (!statsByShard.containsKey(indexService.index())) { statsByShard.put(indexService.index(), arrayAsArrayList(indexShardStats)); } else { @@ -290,17 +292,8 @@ public class IndicesService extends AbstractLifecycleComponent i return indices.values().iterator(); } - public boolean hasIndex(String index) { - return indices.containsKey(index); - } - - /** - * Returns an IndexService for the specified index if exists otherwise returns null. - * - */ - @Nullable - public IndexService indexService(String index) { - return indices.get(index); + public boolean hasIndex(Index index) { + return indices.containsKey(index.getUUID()); } /** @@ -309,33 +302,21 @@ public class IndicesService extends AbstractLifecycleComponent i */ @Nullable public IndexService indexService(Index index) { - return indexService(index.getName()); - } - - /** - * Returns an IndexService for the specified index if exists otherwise a {@link IndexNotFoundException} is thrown. - */ - public IndexService indexServiceSafe(String index) { - IndexService indexService = indexService(index); - if (indexService == null) { - throw new IndexNotFoundException(index); - } - return indexService; + return indices.get(index.getUUID()); } /** * Returns an IndexService for the specified index if exists otherwise a {@link IndexNotFoundException} is thrown. */ public IndexService indexServiceSafe(Index index) { - IndexService indexService = indexServiceSafe(index.getName()); - if (indexService.indexUUID().equals(index.getUUID()) == false) { + IndexService indexService = indices.get(index.getUUID()); + if (indexService == null) { throw new IndexNotFoundException(index); } + assert indexService.indexUUID().equals(index.getUUID()) : "uuid mismatch local: " + indexService.indexUUID() + " incoming: " + index.getUUID(); return indexService; } - - /** * Creates a new {@link IndexService} for the given metadata. * @param indexMetaData the index metadata to create the index for @@ -346,10 +327,13 @@ public class IndicesService extends AbstractLifecycleComponent i if (!lifecycle.started()) { throw new IllegalStateException("Can't create an index [" + indexMetaData.getIndex() + "], node is closed"); } + if (indexMetaData.getIndexUUID().equals(IndexMetaData.INDEX_UUID_NA_VALUE)) { + throw new IllegalArgumentException("index must have a real UUID found value: [" + indexMetaData.getIndexUUID() + "]"); + } final Index index = indexMetaData.getIndex(); final Predicate indexNameMatcher = (indexExpression) -> indexNameExpressionResolver.matchesIndex(index.getName(), indexExpression, clusterService.state()); final IndexSettings idxSettings = new IndexSettings(indexMetaData, this.settings, indexNameMatcher, indexScopeSetting); - if (indices.containsKey(index.getName())) { + if (hasIndex(index)) { throw new IndexAlreadyExistsException(index); } logger.debug("creating Index [{}], shards [{}]/[{}{}]", @@ -378,7 +362,7 @@ public class IndicesService extends AbstractLifecycleComponent i try { assert indexService.getIndexEventListener() == listener; listener.afterIndexCreated(indexService); - indices = newMapBuilder(indices).put(index.getName(), indexService).immutableMap(); + indices = newMapBuilder(indices).put(index.getUUID(), indexService).immutableMap(); success = true; return indexService; } finally { @@ -395,22 +379,24 @@ public class IndicesService extends AbstractLifecycleComponent i * @param index the index to remove * @param reason the high level reason causing this removal */ - public void removeIndex(String index, String reason) { + public void removeIndex(Index index, String reason) { removeIndex(index, reason, false); } - private void removeIndex(String index, String reason, boolean delete) { + private void removeIndex(Index index, String reason, boolean delete) { + final String indexName = index.getName(); try { final IndexService indexService; final IndexEventListener listener; synchronized (this) { - if (indices.containsKey(index) == false) { + if (hasIndex(index) == false) { return; } - logger.debug("[{}] closing ... (reason [{}])", index, reason); + logger.debug("[{}] closing ... (reason [{}])", indexName, reason); Map newIndices = new HashMap<>(indices); - indexService = newIndices.remove(index); + indexService = newIndices.remove(index.getUUID()); + assert indexService != null : "IndexService is null for index: " + index; indices = unmodifiableMap(newIndices); listener = indexService.getIndexEventListener(); } @@ -419,9 +405,9 @@ public class IndicesService extends AbstractLifecycleComponent i if (delete) { listener.beforeIndexDeleted(indexService); } - logger.debug("[{}] closing index service (reason [{}])", index, reason); + logger.debug("{} closing index service (reason [{}])", index, reason); indexService.close(reason, delete); - logger.debug("[{}] closed... (reason [{}])", index, reason); + logger.debug("{} closed... (reason [{}])", index, reason); listener.afterIndexClosed(indexService.index(), indexService.getIndexSettings().getSettings()); if (delete) { final IndexSettings indexSettings = indexService.getIndexSettings(); @@ -474,12 +460,12 @@ public class IndicesService extends AbstractLifecycleComponent i * Deletes the given index. Persistent parts of the index * like the shards files, state and transaction logs are removed once all resources are released. * - * Equivalent to {@link #removeIndex(String, String)} but fires + * Equivalent to {@link #removeIndex(Index, String)} but fires * different lifecycle events to ensure pending resources of this index are immediately removed. * @param index the index to delete * @param reason the high level reason causing this delete */ - public void deleteIndex(String index, String reason) throws IOException { + public void deleteIndex(Index index, String reason) throws IOException { removeIndex(index, reason, true); } @@ -505,16 +491,17 @@ public class IndicesService extends AbstractLifecycleComponent i public void deleteIndexStore(String reason, IndexMetaData metaData, ClusterState clusterState, boolean closed) throws IOException { if (nodeEnv.hasNodeFile()) { synchronized (this) { - String indexName = metaData.getIndex().getName(); - if (indices.containsKey(indexName)) { - String localUUid = indices.get(indexName).indexUUID(); - throw new IllegalStateException("Can't delete index store for [" + indexName + "] - it's still part of the indices service [" + localUUid + "] [" + metaData.getIndexUUID() + "]"); + Index index = metaData.getIndex(); + if (hasIndex(index)) { + String localUUid = indexService(index).indexUUID(); + throw new IllegalStateException("Can't delete index store for [" + index.getName() + "] - it's still part of the indices service [" + localUUid + "] [" + metaData.getIndexUUID() + "]"); } - if (clusterState.metaData().hasIndex(indexName) && (clusterState.nodes().localNode().masterNode() == true)) { + + if (clusterState.metaData().hasIndex(index.getName()) && (clusterState.nodes().localNode().masterNode() == true)) { // we do not delete the store if it is a master eligible node and the index is still in the cluster state // because we want to keep the meta data for indices around even if no shards are left here - final IndexMetaData index = clusterState.metaData().index(indexName); - throw new IllegalStateException("Can't delete closed index store for [" + indexName + "] - it's still part of the cluster state [" + index.getIndexUUID() + "] [" + metaData.getIndexUUID() + "]"); + final IndexMetaData idxMeta = clusterState.metaData().index(index.getName()); + throw new IllegalStateException("Can't delete closed index store for [" + index.getName() + "] - it's still part of the cluster state [" + idxMeta.getIndexUUID() + "] [" + metaData.getIndexUUID() + "]"); } } final IndexSettings indexSettings = buildIndexSettings(metaData); @@ -543,7 +530,7 @@ public class IndicesService extends AbstractLifecycleComponent i } // this is a pure protection to make sure this index doesn't get re-imported as a dangling index. // we should in the future rather write a tombstone rather than wiping the metadata. - MetaDataStateFormat.deleteMetaState(nodeEnv.indexPaths(index.getName())); + MetaDataStateFormat.deleteMetaState(nodeEnv.indexPaths(index)); } } @@ -554,6 +541,7 @@ public class IndicesService extends AbstractLifecycleComponent i * @param indexSettings the shards index settings. * @throws IOException if an IOException occurs */ + @Override public void deleteShardStore(String reason, ShardLock lock, IndexSettings indexSettings) throws IOException { ShardId shardId = lock.getShardId(); logger.trace("{} deleting shard reason [{}]", shardId, reason); @@ -607,7 +595,7 @@ public class IndicesService extends AbstractLifecycleComponent i * @return true if the index can be deleted on this node */ public boolean canDeleteIndexContents(Index index, IndexSettings indexSettings, boolean closed) { - final IndexService indexService = this.indices.get(index.getName()); + final IndexService indexService = indexService(index); // Closed indices may be deleted, even if they are on a shared // filesystem. Since it is closed we aren't deleting it for relocation if (indexSettings.isOnSharedFilesystem() == false || closed) { @@ -634,14 +622,19 @@ public class IndicesService extends AbstractLifecycleComponent i */ public boolean canDeleteShardContent(ShardId shardId, IndexSettings indexSettings) { assert shardId.getIndex().equals(indexSettings.getIndex()); - final IndexService indexService = this.indices.get(shardId.getIndexName()); + final IndexService indexService = indexService(shardId.getIndex()); if (indexSettings.isOnSharedFilesystem() == false) { - if (indexService != null && nodeEnv.hasNodeFile()) { - return indexService.hasShard(shardId.id()) == false; - } else if (nodeEnv.hasNodeFile()) { - if (indexSettings.hasCustomDataPath()) { + if (nodeEnv.hasNodeFile()) { + final boolean isAllocated = indexService != null && indexService.hasShard(shardId.id()); + if (isAllocated) { + return false; // we are allocated - can't delete the shard + } else if (indexSettings.hasCustomDataPath()) { + // lets see if it's on a custom path (return false if the shared doesn't exist) + // we don't need to delete anything that is not there return Files.exists(nodeEnv.resolveCustomLocation(indexSettings, shardId)); } else { + // lets see if it's path is available (return false if the shared doesn't exist) + // we don't need to delete anything that is not there return FileSystemUtils.exists(nodeEnv.availableShardPaths(shardId)); } } @@ -661,6 +654,7 @@ public class IndicesService extends AbstractLifecycleComponent i /** * Adds a pending delete for the given index shard. */ + @Override public void addPendingDelete(ShardId shardId, IndexSettings settings) { if (shardId == null) { throw new IllegalArgumentException("shardId must not be null"); diff --git a/core/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java b/core/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java index 7d24d4fa897..fce4e8411db 100644 --- a/core/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java +++ b/core/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java @@ -38,7 +38,7 @@ import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.get.GetStats; import org.elasticsearch.index.shard.IndexingStats; import org.elasticsearch.index.merge.MergeStats; -import org.elasticsearch.index.percolator.PercolateStats; +import org.elasticsearch.index.percolator.PercolatorQueryCacheStats; import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.refresh.RefreshStats; import org.elasticsearch.index.search.stats.SearchStats; @@ -105,8 +105,8 @@ public class NodeIndicesStats implements Streamable, ToXContent { } @Nullable - public PercolateStats getPercolate() { - return stats.getPercolate(); + public PercolatorQueryCacheStats getPercolate() { + return stats.getPercolatorCache(); } @Nullable @@ -172,7 +172,7 @@ public class NodeIndicesStats implements Streamable, ToXContent { int entries = in.readVInt(); statsByShard = new HashMap<>(); for (int i = 0; i < entries; i++) { - Index index = Index.readIndex(in); + Index index = new Index(in); int indexShardListSize = in.readVInt(); List indexShardStats = new ArrayList<>(indexShardListSize); for (int j = 0; j < indexShardListSize; j++) { diff --git a/core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java b/core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java index e73396fcd7f..20a1d341cf9 100644 --- a/core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java +++ b/core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java @@ -160,15 +160,21 @@ public final class AnalysisModule extends AbstractModule { @Override protected void configure() { try { - HunspellService service = new HunspellService(environment.settings(), environment, knownDictionaries); - AnalysisRegistry registry = new AnalysisRegistry(service, environment, charFilters, tokenFilters, tokenizers, analyzers); - bind(HunspellService.class).toInstance(service); + AnalysisRegistry registry = buildRegistry(); + bind(HunspellService.class).toInstance(registry.getHunspellService()); bind(AnalysisRegistry.class).toInstance(registry); } catch (IOException e) { throw new ElasticsearchException("failed to load hunspell service", e); } } + /** + * Builds an {@link AnalysisRegistry} from the current configuration. + */ + public AnalysisRegistry buildRegistry() throws IOException { + return new AnalysisRegistry(new HunspellService(environment.settings(), environment, knownDictionaries), environment, charFilters, tokenFilters, tokenizers, analyzers); + } + /** * AnalysisProvider is the basic factory interface for registering analysis components like: *

      diff --git a/core/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java b/core/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java index f99b39ef620..75c15f09778 100644 --- a/core/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java +++ b/core/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java @@ -19,11 +19,14 @@ package org.elasticsearch.indices.analysis; import org.apache.lucene.analysis.hunspell.Dictionary; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.SimpleFSDirectory; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; @@ -71,9 +74,12 @@ import java.util.function.Function; */ public class HunspellService extends AbstractComponent { - public final static Setting HUNSPELL_LAZY_LOAD = Setting.boolSetting("indices.analysis.hunspell.dictionary.lazy", Boolean.FALSE, false, Setting.Scope.CLUSTER); - public final static Setting HUNSPELL_IGNORE_CASE = Setting.boolSetting("indices.analysis.hunspell.dictionary.ignore_case", Boolean.FALSE, false, Setting.Scope.CLUSTER); - public final static Setting HUNSPELL_DICTIONARY_OPTIONS = Setting.groupSetting("indices.analysis.hunspell.dictionary.", false, Setting.Scope.CLUSTER); + public final static Setting HUNSPELL_LAZY_LOAD = + Setting.boolSetting("indices.analysis.hunspell.dictionary.lazy", Boolean.FALSE, Property.NodeScope); + public final static Setting HUNSPELL_IGNORE_CASE = + Setting.boolSetting("indices.analysis.hunspell.dictionary.ignore_case", Boolean.FALSE, Property.NodeScope); + public final static Setting HUNSPELL_DICTIONARY_OPTIONS = + Setting.groupSetting("indices.analysis.hunspell.dictionary.", Property.NodeScope); private final ConcurrentHashMap dictionaries = new ConcurrentHashMap<>(); private final Map knownDictionaries; private final boolean defaultIgnoreCase; @@ -183,7 +189,9 @@ public class HunspellService extends AbstractComponent { affixStream = Files.newInputStream(affixFiles[0]); - return new Dictionary(affixStream, dicStreams, ignoreCase); + try (Directory tmp = new SimpleFSDirectory(env.tmpFile())) { + return new Dictionary(tmp, "hunspell", affixStream, dicStreams, ignoreCase); + } } catch (Exception e) { logger.error("Could not load hunspell dictionary [{}]", e, locale); diff --git a/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java b/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java index 0e1532bc6b3..d2d96092186 100644 --- a/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java +++ b/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; @@ -46,15 +47,22 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService { private final ConcurrentMap breakers = new ConcurrentHashMap(); - public static final Setting TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.byteSizeSetting("indices.breaker.total.limit", "70%", true, Setting.Scope.CLUSTER); + public static final Setting TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING = + Setting.byteSizeSetting("indices.breaker.total.limit", "70%", Property.Dynamic, Property.NodeScope); - public static final Setting FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.byteSizeSetting("indices.breaker.fielddata.limit", "60%", true, Setting.Scope.CLUSTER); - public static final Setting FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING = Setting.doubleSetting("indices.breaker.fielddata.overhead", 1.03d, 0.0d, true, Setting.Scope.CLUSTER); - public static final Setting FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING = new Setting<>("indices.breaker.fielddata.type", "memory", CircuitBreaker.Type::parseValue, false, Setting.Scope.CLUSTER); + public static final Setting FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING = + Setting.byteSizeSetting("indices.breaker.fielddata.limit", "60%", Property.Dynamic, Property.NodeScope); + public static final Setting FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING = + Setting.doubleSetting("indices.breaker.fielddata.overhead", 1.03d, 0.0d, Property.Dynamic, Property.NodeScope); + public static final Setting FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING = + new Setting<>("indices.breaker.fielddata.type", "memory", CircuitBreaker.Type::parseValue, Property.NodeScope); - public static final Setting REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.byteSizeSetting("indices.breaker.request.limit", "40%", true, Setting.Scope.CLUSTER); - public static final Setting REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING = Setting.doubleSetting("indices.breaker.request.overhead", 1.0d, 0.0d, true, Setting.Scope.CLUSTER); - public static final Setting REQUEST_CIRCUIT_BREAKER_TYPE_SETTING = new Setting<>("indices.breaker.request.type", "memory", CircuitBreaker.Type::parseValue, false, Setting.Scope.CLUSTER); + public static final Setting REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING = + Setting.byteSizeSetting("indices.breaker.request.limit", "40%", Property.Dynamic, Property.NodeScope); + public static final Setting REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING = + Setting.doubleSetting("indices.breaker.request.overhead", 1.0d, 0.0d, Property.Dynamic, Property.NodeScope); + public static final Setting REQUEST_CIRCUIT_BREAKER_TYPE_SETTING = + new Setting<>("indices.breaker.request.type", "memory", CircuitBreaker.Type::parseValue, Property.NodeScope); diff --git a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index 7998afb7656..46ead3fbf36 100644 --- a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -22,7 +22,6 @@ package org.elasticsearch.indices.cluster; import com.carrotsearch.hppc.IntHashSet; import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.action.index.NodeIndexDeletedAction; @@ -38,6 +37,7 @@ import org.elasticsearch.cluster.routing.RestoreSource; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.compress.CompressedXContent; @@ -46,6 +46,7 @@ import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Callback; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexShardAlreadyExistsException; @@ -157,13 +158,13 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent { @Override public void handle(final IndexShard.ShardFailure shardFailure) { - final IndexService indexService = indicesService.indexService(shardFailure.routing.shardId().getIndex().getName()); + final IndexService indexService = indicesService.indexService(shardFailure.routing.shardId().getIndex()); final ShardRouting shardRouting = shardFailure.routing; threadPool.generic().execute(() -> { synchronized (mutex) { diff --git a/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java b/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java index 0a3f063dfcc..46744f4d848 100644 --- a/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java +++ b/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.Index; @@ -52,7 +53,8 @@ import java.util.function.ToLongBiFunction; */ public class IndicesFieldDataCache extends AbstractComponent implements RemovalListener, Releasable{ - public static final Setting INDICES_FIELDDATA_CACHE_SIZE_KEY = Setting.byteSizeSetting("indices.fielddata.cache.size", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER); + public static final Setting INDICES_FIELDDATA_CACHE_SIZE_KEY = + Setting.byteSizeSetting("indices.fielddata.cache.size", new ByteSizeValue(-1), Property.NodeScope); private final IndexFieldDataCache.Listener indicesFieldDataCacheListener; private final Cache cache; diff --git a/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java index 6eb7c88a2a4..b1d7af7ff9c 100644 --- a/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ b/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java @@ -23,7 +23,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -31,6 +30,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; @@ -39,6 +39,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.CountDown; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; @@ -111,15 +112,15 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL */ public void attemptSyncedFlush(final String[] aliasesOrIndices, IndicesOptions indicesOptions, final ActionListener listener) { final ClusterState state = clusterService.state(); - final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, indicesOptions, aliasesOrIndices); + final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, indicesOptions, aliasesOrIndices); final Map> results = ConcurrentCollections.newConcurrentMap(); int totalNumberOfShards = 0; int numberOfShards = 0; - for (String index : concreteIndices) { - final IndexMetaData indexMetaData = state.metaData().index(index); + for (Index index : concreteIndices) { + final IndexMetaData indexMetaData = state.metaData().getIndexSafe(index); totalNumberOfShards += indexMetaData.getTotalNumberOfShards(); numberOfShards += indexMetaData.getNumberOfShards(); - results.put(index, Collections.synchronizedList(new ArrayList<>())); + results.put(index.getName(), Collections.synchronizedList(new ArrayList<>())); } if (numberOfShards == 0) { @@ -129,8 +130,9 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL final int finalTotalNumberOfShards = totalNumberOfShards; final CountDown countDown = new CountDown(numberOfShards); - for (final String index : concreteIndices) { - final IndexMetaData indexMetaData = state.metaData().index(index); + for (final Index concreteIndex : concreteIndices) { + final String index = concreteIndex.getName(); + final IndexMetaData indexMetaData = state.metaData().getIndexSafe(concreteIndex); final int indexNumberOfShards = indexMetaData.getNumberOfShards(); for (int shard = 0; shard < indexNumberOfShards; shard++) { final ShardId shardId = new ShardId(indexMetaData.getIndex(), shard); @@ -240,7 +242,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL final IndexShardRoutingTable getShardRoutingTable(ShardId shardId, ClusterState state) { final IndexRoutingTable indexRoutingTable = state.routingTable().index(shardId.getIndexName()); if (indexRoutingTable == null) { - IndexMetaData index = state.getMetaData().index(shardId.getIndexName()); + IndexMetaData index = state.getMetaData().index(shardId.getIndex()); if (index != null && index.getState() == IndexMetaData.State.CLOSE) { throw new IndexClosedException(shardId.getIndex()); } @@ -309,7 +311,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL } final Engine.CommitId expectedCommitId = expectedCommitIds.get(shard.currentNodeId()); if (expectedCommitId == null) { - logger.trace("{} can't resolve expected commit id for {}, skipping for sync id [{}]. shard routing {}", shardId, syncId, shard); + logger.trace("{} can't resolve expected commit id for current node, skipping for sync id [{}]. shard routing {}", shardId, syncId, shard); results.put(shard, new ShardSyncedFlushResponse("no commit id from pre-sync flush")); contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); continue; diff --git a/core/src/main/java/org/elasticsearch/indices/query/IndicesQueriesRegistry.java b/core/src/main/java/org/elasticsearch/indices/query/IndicesQueriesRegistry.java index a9e90884a68..b0b212d2ab4 100644 --- a/core/src/main/java/org/elasticsearch/indices/query/IndicesQueriesRegistry.java +++ b/core/src/main/java/org/elasticsearch/indices/query/IndicesQueriesRegistry.java @@ -19,12 +19,12 @@ package org.elasticsearch.indices.query; -import java.util.Map; - import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryParser; +import java.util.Map; + public class IndicesQueriesRegistry extends AbstractComponent { private Map> queryParsers; diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java index 24f87ee436f..8494939e46d 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java @@ -238,7 +238,7 @@ public class RecoveriesCollection { return; } lastSeenAccessTime = accessTime; - logger.trace("[monitor] rescheduling check for [{}]. last access time is [{}]", lastSeenAccessTime); + logger.trace("[monitor] rescheduling check for [{}]. last access time is [{}]", recoveryId, lastSeenAccessTime); threadPool.schedule(checkInterval, ThreadPool.Names.GENERIC, this); } } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java index 8d610dce05b..82595458479 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -32,31 +33,45 @@ import org.elasticsearch.common.unit.TimeValue; public class RecoverySettings extends AbstractComponent { - public static final Setting INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting("indices.recovery.max_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB), true, Setting.Scope.CLUSTER); + public static final Setting INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING = + Setting.byteSizeSetting("indices.recovery.max_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB), + Property.Dynamic, Property.NodeScope); /** * how long to wait before retrying after issues cause by cluster state syncing between nodes * i.e., local node is not yet known on remote node, remote shard not yet started etc. */ - public static final Setting INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING = Setting.positiveTimeSetting("indices.recovery.retry_delay_state_sync", TimeValue.timeValueMillis(500), true, Setting.Scope.CLUSTER); + public static final Setting INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING = + Setting.positiveTimeSetting("indices.recovery.retry_delay_state_sync", TimeValue.timeValueMillis(500), + Property.Dynamic, Property.NodeScope); /** how long to wait before retrying after network related issues */ - public static final Setting INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING = Setting.positiveTimeSetting("indices.recovery.retry_delay_network", TimeValue.timeValueSeconds(5), true, Setting.Scope.CLUSTER); + public static final Setting INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING = + Setting.positiveTimeSetting("indices.recovery.retry_delay_network", TimeValue.timeValueSeconds(5), + Property.Dynamic, Property.NodeScope); /** timeout value to use for requests made as part of the recovery process */ - public static final Setting INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING = Setting.positiveTimeSetting("indices.recovery.internal_action_timeout", TimeValue.timeValueMinutes(15), true, Setting.Scope.CLUSTER); + public static final Setting INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING = + Setting.positiveTimeSetting("indices.recovery.internal_action_timeout", TimeValue.timeValueMinutes(15), + Property.Dynamic, Property.NodeScope); /** * timeout value to use for requests made as part of the recovery process that are expected to take long time. * defaults to twice `indices.recovery.internal_action_timeout`. */ - public static final Setting INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING = Setting.timeSetting("indices.recovery.internal_action_long_timeout", (s) -> TimeValue.timeValueMillis(INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.get(s).millis() * 2).toString(), TimeValue.timeValueSeconds(0), true, Setting.Scope.CLUSTER); + public static final Setting INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING = + Setting.timeSetting("indices.recovery.internal_action_long_timeout", + (s) -> TimeValue.timeValueMillis(INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.get(s).millis() * 2).toString(), + TimeValue.timeValueSeconds(0), Property.Dynamic, Property.NodeScope); /** * recoveries that don't show any activity for more then this interval will be failed. * defaults to `indices.recovery.internal_action_long_timeout` */ - public static final Setting INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING = Setting.timeSetting("indices.recovery.recovery_activity_timeout", (s) -> INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.getRaw(s) , TimeValue.timeValueSeconds(0), true, Setting.Scope.CLUSTER); + public static final Setting INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING = + Setting.timeSetting("indices.recovery.recovery_activity_timeout", + (s) -> INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.getRaw(s) , TimeValue.timeValueSeconds(0), + Property.Dynamic, Property.NodeScope); public static final ByteSizeValue DEFAULT_CHUNK_SIZE = new ByteSizeValue(512, ByteSizeUnit.KB); diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java index 9a5c23fc2e1..aaf351f6056 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java @@ -20,9 +20,9 @@ package org.elasticsearch.indices.recovery; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; @@ -83,7 +83,7 @@ public class RecoverySource extends AbstractComponent implements IndexEventListe } private RecoveryResponse recover(final StartRecoveryRequest request) throws IOException { - final IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex().getName()); + final IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); final IndexShard shard = indexService.getShard(request.shardId().id()); // starting recovery from that our (the source) shard state is marking the shard to be in recovery mode as well, otherwise diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index b92e2066af2..b609eb5d08a 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -137,7 +137,7 @@ public class RecoverySourceHandler { } } - logger.trace("snapshot translog for recovery. current size is [{}]", translogView.totalOperations()); + logger.trace("{} snapshot translog for recovery. current size is [{}]", shard.shardId(), translogView.totalOperations()); try { phase2(translogView.snapshot()); } catch (Throwable e) { @@ -289,7 +289,7 @@ public class RecoverySourceHandler { RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but " + "checksums are ok", null); exception.addSuppressed(targetException); - logger.warn("{} Remote file corruption during finalization on node {}, recovering {}. local checksum OK", + logger.warn("{} Remote file corruption during finalization of recovery on node {}. local checksum OK", corruptIndexException, shard.shardId(), request.targetNode()); throw exception; } else { diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetService.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetService.java index dcbb0c7bedf..d57cfbb98c8 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetService.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetService.java @@ -24,10 +24,10 @@ import org.apache.lucene.store.RateLimiter; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; @@ -218,7 +218,7 @@ public class RecoveryTargetService extends AbstractComponent implements IndexEve "operations") .append(", took [").append(timeValueMillis(recoveryResponse.phase2Time)).append("]") .append("\n"); - logger.trace(sb.toString()); + logger.trace("{}", sb); } else { logger.debug("{} recovery done from [{}], took [{}]", request.shardId(), recoveryTarget.sourceNode(), recoveryTime); } diff --git a/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java b/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java index d0aec817ee9..d2db41a7a0c 100644 --- a/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java +++ b/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java @@ -21,21 +21,23 @@ package org.elasticsearch.indices.store; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; @@ -57,7 +59,6 @@ import org.elasticsearch.transport.TransportService; import java.io.Closeable; import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.EnumSet; import java.util.List; import java.util.concurrent.TimeUnit; @@ -69,7 +70,9 @@ import java.util.concurrent.atomic.AtomicInteger; public class IndicesStore extends AbstractComponent implements ClusterStateListener, Closeable { // TODO this class can be foled into either IndicesService and partially into IndicesClusterStateService there is no need for a separate public service - public static final Setting INDICES_STORE_DELETE_SHARD_TIMEOUT = Setting.positiveTimeSetting("indices.store.delete.shard.timeout", new TimeValue(30, TimeUnit.SECONDS), false, Setting.Scope.CLUSTER); + public static final Setting INDICES_STORE_DELETE_SHARD_TIMEOUT = + Setting.positiveTimeSetting("indices.store.delete.shard.timeout", new TimeValue(30, TimeUnit.SECONDS), + Property.NodeScope); public static final String ACTION_SHARD_EXISTS = "internal:index/shard/exists"; private static final EnumSet ACTIVE_STATES = EnumSet.of(IndexShardState.STARTED, IndexShardState.RELOCATED); private final IndicesService indicesService; @@ -113,7 +116,13 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe if (shardCanBeDeleted(event.state(), indexShardRoutingTable)) { ShardId shardId = indexShardRoutingTable.shardId(); IndexService indexService = indicesService.indexService(indexRoutingTable.getIndex()); - IndexSettings indexSettings = indexService != null ? indexService.getIndexSettings() : new IndexSettings(event.state().getMetaData().index(indexRoutingTable.getIndex()), settings); + final IndexSettings indexSettings; + if (indexService == null) { + IndexMetaData indexMetaData = event.state().getMetaData().getIndexSafe(indexRoutingTable.getIndex()); + indexSettings = new IndexSettings(indexMetaData, settings); + } else { + indexSettings = indexService.getIndexSettings(); + } if (indicesService.canDeleteShardContent(shardId, indexSettings)) { deleteShardIfExistElseWhere(event.state(), indexShardRoutingTable); } @@ -162,7 +171,7 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe private void deleteShardIfExistElseWhere(ClusterState state, IndexShardRoutingTable indexShardRoutingTable) { List> requests = new ArrayList<>(indexShardRoutingTable.size()); - String indexUUID = state.getMetaData().index(indexShardRoutingTable.shardId().getIndex()).getIndexUUID(); + String indexUUID = indexShardRoutingTable.shardId().getIndex().getUUID(); ClusterName clusterName = state.getClusterName(); for (ShardRouting shardRouting : indexShardRoutingTable) { // Node can't be null, because otherwise shardCanBeDeleted() would have returned false @@ -348,7 +357,7 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe return null; } ShardId shardId = request.shardId; - IndexService indexService = indicesService.indexService(shardId.getIndexName()); + IndexService indexService = indicesService.indexService(shardId.getIndex()); if (indexService != null && indexService.indexUUID().equals(request.indexUUID)) { return indexService.getShardOrNull(shardId.id()); } diff --git a/core/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java b/core/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java index bcc2d7f74c4..35a34ebea1b 100644 --- a/core/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java +++ b/core/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java @@ -29,11 +29,11 @@ import org.elasticsearch.action.support.nodes.BaseNodesRequest; import org.elasticsearch.action.support.nodes.BaseNodesResponse; import org.elasticsearch.action.support.nodes.TransportNodesAction; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -126,7 +126,7 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesAction { - public static final Setting INDICES_TTL_INTERVAL_SETTING = Setting.positiveTimeSetting("indices.ttl.interval", TimeValue.timeValueSeconds(60), true, Setting.Scope.CLUSTER); + public static final Setting INDICES_TTL_INTERVAL_SETTING = + Setting.positiveTimeSetting("indices.ttl.interval", TimeValue.timeValueSeconds(60), + Property.Dynamic, Property.NodeScope); private final ClusterService clusterService; private final IndicesService indicesService; @@ -159,7 +162,7 @@ public class IndicesTTLService extends AbstractLifecycleComponent processorFactories = pipelineStore.getProcessorRegistry().getProcessorFactories(); + List processorInfoList = new ArrayList<>(processorFactories.size()); + for (Map.Entry entry : processorFactories.entrySet()) { + processorInfoList.add(new ProcessorInfo(entry.getKey())); + } + return new IngestInfo(processorInfoList); + } + @Override public void close() throws IOException { pipelineStore.close(); diff --git a/core/src/main/java/org/elasticsearch/ingest/IngestStats.java b/core/src/main/java/org/elasticsearch/ingest/IngestStats.java new file mode 100644 index 00000000000..9ccc4126763 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/ingest/IngestStats.java @@ -0,0 +1,169 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +public class IngestStats implements Writeable, ToXContent { + private final Stats totalStats; + private final Map statsPerPipeline; + + public IngestStats(StreamInput in) throws IOException { + this.totalStats = new Stats(in); + int size = in.readVInt(); + this.statsPerPipeline = new HashMap<>(size); + for (int i = 0; i < size; i++) { + statsPerPipeline.put(in.readString(), new Stats(in)); + } + } + + public IngestStats(Stats totalStats, Map statsPerPipeline) { + this.totalStats = totalStats; + this.statsPerPipeline = statsPerPipeline; + } + + /** + * @return The accumulated stats for all pipelines + */ + public Stats getTotalStats() { + return totalStats; + } + + /** + * @return The stats on a per pipeline basis + */ + public Map getStatsPerPipeline() { + return statsPerPipeline; + } + + @Override + public IngestStats readFrom(StreamInput in) throws IOException { + return new IngestStats(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + totalStats.writeTo(out); + out.writeVLong(statsPerPipeline.size()); + for (Map.Entry entry : statsPerPipeline.entrySet()) { + out.writeString(entry.getKey()); + entry.getValue().writeTo(out); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("ingest"); + builder.startObject("total"); + totalStats.toXContent(builder, params); + builder.endObject(); + builder.startObject("pipelines"); + for (Map.Entry entry : statsPerPipeline.entrySet()) { + builder.startObject(entry.getKey()); + entry.getValue().toXContent(builder, params); + builder.endObject(); + } + builder.endObject(); + builder.endObject(); + return builder; + } + + public static class Stats implements Writeable, ToXContent { + + private final long ingestCount; + private final long ingestTimeInMillis; + private final long ingestCurrent; + private final long ingestFailedCount; + + public Stats(StreamInput in) throws IOException { + ingestCount = in.readVLong(); + ingestTimeInMillis = in.readVLong(); + ingestCurrent = in.readVLong(); + ingestFailedCount = in.readVLong(); + } + + public Stats(long ingestCount, long ingestTimeInMillis, long ingestCurrent, long ingestFailedCount) { + this.ingestCount = ingestCount; + this.ingestTimeInMillis = ingestTimeInMillis; + this.ingestCurrent = ingestCurrent; + this.ingestFailedCount = ingestFailedCount; + } + + /** + * @return The total number of executed ingest preprocessing operations. + */ + public long getIngestCount() { + return ingestCount; + } + + /** + * + * @return The total time spent of ingest preprocessing in millis. + */ + public long getIngestTimeInMillis() { + return ingestTimeInMillis; + } + + /** + * @return The total number of ingest preprocessing operations currently executing. + */ + public long getIngestCurrent() { + return ingestCurrent; + } + + /** + * @return The total number of ingest preprocessing operations that have failed. + */ + public long getIngestFailedCount() { + return ingestFailedCount; + } + + @Override + public Stats readFrom(StreamInput in) throws IOException { + return new Stats(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(ingestCount); + out.writeVLong(ingestTimeInMillis); + out.writeVLong(ingestCurrent); + out.writeVLong(ingestFailedCount); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("count", ingestCount); + builder.timeValueField("time_in_millis", "time", ingestTimeInMillis, TimeUnit.MILLISECONDS); + builder.field("current", ingestCurrent); + builder.field("failed", ingestFailedCount); + return builder; + } + } +} diff --git a/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java b/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java index 3f0de550782..94c79db30a0 100644 --- a/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java +++ b/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java @@ -19,23 +19,36 @@ package org.elasticsearch.ingest; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.metrics.CounterMetric; +import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.ingest.core.IngestDocument; import org.elasticsearch.ingest.core.Pipeline; import org.elasticsearch.threadpool.ThreadPool; +import java.util.Collections; +import java.util.HashMap; import java.util.Map; +import java.util.Optional; +import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; import java.util.function.Consumer; -public class PipelineExecutionService { +public class PipelineExecutionService implements ClusterStateListener { private final PipelineStore store; private final ThreadPool threadPool; + private final StatsHolder totalStats = new StatsHolder(); + private volatile Map statsHolderPerPipeline = Collections.emptyMap(); + public PipelineExecutionService(PipelineStore store, ThreadPool threadPool) { this.store = store; this.threadPool = threadPool; @@ -89,29 +102,85 @@ public class PipelineExecutionService { }); } - private void innerExecute(IndexRequest indexRequest, Pipeline pipeline) throws Exception { - String index = indexRequest.index(); - String type = indexRequest.type(); - String id = indexRequest.id(); - String routing = indexRequest.routing(); - String parent = indexRequest.parent(); - String timestamp = indexRequest.timestamp(); - String ttl = indexRequest.ttl() == null ? null : indexRequest.ttl().toString(); - Map sourceAsMap = indexRequest.sourceAsMap(); - IngestDocument ingestDocument = new IngestDocument(index, type, id, routing, parent, timestamp, ttl, sourceAsMap); - pipeline.execute(ingestDocument); + public IngestStats stats() { + Map statsHolderPerPipeline = this.statsHolderPerPipeline; - Map metadataMap = ingestDocument.extractMetadata(); - //it's fine to set all metadata fields all the time, as ingest document holds their starting values - //before ingestion, which might also get modified during ingestion. - indexRequest.index(metadataMap.get(IngestDocument.MetaData.INDEX)); - indexRequest.type(metadataMap.get(IngestDocument.MetaData.TYPE)); - indexRequest.id(metadataMap.get(IngestDocument.MetaData.ID)); - indexRequest.routing(metadataMap.get(IngestDocument.MetaData.ROUTING)); - indexRequest.parent(metadataMap.get(IngestDocument.MetaData.PARENT)); - indexRequest.timestamp(metadataMap.get(IngestDocument.MetaData.TIMESTAMP)); - indexRequest.ttl(metadataMap.get(IngestDocument.MetaData.TTL)); - indexRequest.source(ingestDocument.getSourceAndMetadata()); + Map statsPerPipeline = new HashMap<>(statsHolderPerPipeline.size()); + for (Map.Entry entry : statsHolderPerPipeline.entrySet()) { + statsPerPipeline.put(entry.getKey(), entry.getValue().createStats()); + } + + return new IngestStats(totalStats.createStats(), statsPerPipeline); + } + + @Override + public void clusterChanged(ClusterChangedEvent event) { + IngestMetadata ingestMetadata = event.state().getMetaData().custom(IngestMetadata.TYPE); + if (ingestMetadata != null) { + updatePipelineStats(ingestMetadata); + } + } + + void updatePipelineStats(IngestMetadata ingestMetadata) { + boolean changed = false; + Map newStatsPerPipeline = new HashMap<>(statsHolderPerPipeline); + for (String pipeline : newStatsPerPipeline.keySet()) { + if (ingestMetadata.getPipelines().containsKey(pipeline) == false) { + newStatsPerPipeline.remove(pipeline); + changed = true; + } + } + for (String pipeline : ingestMetadata.getPipelines().keySet()) { + if (newStatsPerPipeline.containsKey(pipeline) == false) { + newStatsPerPipeline.put(pipeline, new StatsHolder()); + changed = true; + } + } + + if (changed) { + statsHolderPerPipeline = Collections.unmodifiableMap(newStatsPerPipeline); + } + } + + private void innerExecute(IndexRequest indexRequest, Pipeline pipeline) throws Exception { + long startTimeInNanos = System.nanoTime(); + // the pipeline specific stat holder may not exist and that is fine: + // (e.g. the pipeline may have been removed while we're ingesting a document + Optional pipelineStats = Optional.ofNullable(statsHolderPerPipeline.get(pipeline.getId())); + try { + totalStats.preIngest(); + pipelineStats.ifPresent(StatsHolder::preIngest); + String index = indexRequest.index(); + String type = indexRequest.type(); + String id = indexRequest.id(); + String routing = indexRequest.routing(); + String parent = indexRequest.parent(); + String timestamp = indexRequest.timestamp(); + String ttl = indexRequest.ttl() == null ? null : indexRequest.ttl().toString(); + Map sourceAsMap = indexRequest.sourceAsMap(); + IngestDocument ingestDocument = new IngestDocument(index, type, id, routing, parent, timestamp, ttl, sourceAsMap); + pipeline.execute(ingestDocument); + + Map metadataMap = ingestDocument.extractMetadata(); + //it's fine to set all metadata fields all the time, as ingest document holds their starting values + //before ingestion, which might also get modified during ingestion. + indexRequest.index(metadataMap.get(IngestDocument.MetaData.INDEX)); + indexRequest.type(metadataMap.get(IngestDocument.MetaData.TYPE)); + indexRequest.id(metadataMap.get(IngestDocument.MetaData.ID)); + indexRequest.routing(metadataMap.get(IngestDocument.MetaData.ROUTING)); + indexRequest.parent(metadataMap.get(IngestDocument.MetaData.PARENT)); + indexRequest.timestamp(metadataMap.get(IngestDocument.MetaData.TIMESTAMP)); + indexRequest.ttl(metadataMap.get(IngestDocument.MetaData.TTL)); + indexRequest.source(ingestDocument.getSourceAndMetadata()); + } catch (Exception e) { + totalStats.ingestFailed(); + pipelineStats.ifPresent(StatsHolder::ingestFailed); + throw e; + } finally { + long ingestTimeInMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTimeInNanos); + totalStats.postIngest(ingestTimeInMillis); + pipelineStats.ifPresent(statsHolder -> statsHolder.postIngest(ingestTimeInMillis)); + } } private Pipeline getPipeline(String pipelineId) { @@ -121,4 +190,30 @@ public class PipelineExecutionService { } return pipeline; } + + static class StatsHolder { + + private final MeanMetric ingestMetric = new MeanMetric(); + private final CounterMetric ingestCurrent = new CounterMetric(); + private final CounterMetric ingestFailed = new CounterMetric(); + + void preIngest() { + ingestCurrent.inc(); + } + + void postIngest(long ingestTimeInMillis) { + ingestCurrent.dec(); + ingestMetric.inc(ingestTimeInMillis); + } + + void ingestFailed() { + ingestFailed.inc(); + } + + IngestStats.Stats createStats() { + return new IngestStats.Stats(ingestMetric.count(), ingestMetric.sum(), ingestCurrent.count(), ingestFailed.count()); + } + + } + } diff --git a/core/src/main/java/org/elasticsearch/ingest/PipelineStore.java b/core/src/main/java/org/elasticsearch/ingest/PipelineStore.java index 3999f357b86..7e0dc1b4ffa 100644 --- a/core/src/main/java/org/elasticsearch/ingest/PipelineStore.java +++ b/core/src/main/java/org/elasticsearch/ingest/PipelineStore.java @@ -20,6 +20,7 @@ package org.elasticsearch.ingest; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ingest.DeletePipelineRequest; @@ -27,14 +28,16 @@ import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.action.ingest.WritePipelineResponse; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.ingest.core.IngestInfo; import org.elasticsearch.ingest.core.Pipeline; import org.elasticsearch.ingest.core.Processor; import org.elasticsearch.ingest.core.TemplateService; @@ -130,8 +133,8 @@ public class PipelineStore extends AbstractComponent implements Closeable, Clust pipelines.remove(request.getId()); ClusterState.Builder newState = ClusterState.builder(currentState); newState.metaData(MetaData.builder(currentState.getMetaData()) - .putCustom(IngestMetadata.TYPE, new IngestMetadata(pipelines)) - .build()); + .putCustom(IngestMetadata.TYPE, new IngestMetadata(pipelines)) + .build()); return newState.build(); } } @@ -139,15 +142,9 @@ public class PipelineStore extends AbstractComponent implements Closeable, Clust /** * Stores the specified pipeline definition in the request. */ - public void put(ClusterService clusterService, PutPipelineRequest request, ActionListener listener) { + public void put(ClusterService clusterService, Map ingestInfos, PutPipelineRequest request, ActionListener listener) throws Exception { // validates the pipeline and processor configuration before submitting a cluster update task: - Map pipelineConfig = XContentHelper.convertToMap(request.getSource(), false).v2(); - try { - factory.create(request.getId(), pipelineConfig, processorRegistry); - } catch(Exception e) { - listener.onFailure(e); - return; - } + validatePipeline(ingestInfos, request); clusterService.submitStateUpdateTask("put-pipeline-" + request.getId(), new AckedClusterStateUpdateTask(request, listener) { @Override @@ -162,6 +159,25 @@ public class PipelineStore extends AbstractComponent implements Closeable, Clust }); } + void validatePipeline(Map ingestInfos, PutPipelineRequest request) throws Exception { + if (ingestInfos.isEmpty()) { + throw new IllegalStateException("Ingest info is empty"); + } + + Map pipelineConfig = XContentHelper.convertToMap(request.getSource(), false).v2(); + Pipeline pipeline = factory.create(request.getId(), pipelineConfig, processorRegistry); + List exceptions = new ArrayList<>(); + for (Processor processor : pipeline.flattenAllProcessors()) { + for (Map.Entry entry : ingestInfos.entrySet()) { + if (entry.getValue().containsProcessor(processor.getType()) == false) { + String message = "Processor type [" + processor.getType() + "] is not installed on node [" + entry.getKey() + "]"; + exceptions.add(new IllegalArgumentException(message)); + } + } + } + ExceptionsHelper.rethrowAndSuppress(exceptions); + } + ClusterState innerPut(PutPipelineRequest request, ClusterState currentState) { IngestMetadata currentIngestMetadata = currentState.metaData().custom(IngestMetadata.TYPE); Map pipelines; diff --git a/core/src/main/java/org/elasticsearch/ingest/ProcessorsRegistry.java b/core/src/main/java/org/elasticsearch/ingest/ProcessorsRegistry.java index bd885c578b3..e831d70702e 100644 --- a/core/src/main/java/org/elasticsearch/ingest/ProcessorsRegistry.java +++ b/core/src/main/java/org/elasticsearch/ingest/ProcessorsRegistry.java @@ -21,6 +21,7 @@ package org.elasticsearch.ingest; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ingest.core.Processor; +import org.elasticsearch.ingest.core.ProcessorInfo; import org.elasticsearch.ingest.core.TemplateService; import java.io.Closeable; diff --git a/core/src/main/java/org/elasticsearch/ingest/core/CompoundProcessor.java b/core/src/main/java/org/elasticsearch/ingest/core/CompoundProcessor.java index c784ea1c57a..ddf3781d1a6 100644 --- a/core/src/main/java/org/elasticsearch/ingest/core/CompoundProcessor.java +++ b/core/src/main/java/org/elasticsearch/ingest/core/CompoundProcessor.java @@ -20,6 +20,9 @@ package org.elasticsearch.ingest.core; +import org.elasticsearch.common.util.iterable.Iterables; + +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -56,6 +59,24 @@ public class CompoundProcessor implements Processor { return processors; } + public List flattenProcessors() { + List allProcessors = new ArrayList<>(flattenProcessors(processors)); + allProcessors.addAll(flattenProcessors(onFailureProcessors)); + return allProcessors; + } + + private static List flattenProcessors(List processors) { + List flattened = new ArrayList<>(); + for (Processor processor : processors) { + if (processor instanceof CompoundProcessor) { + flattened.addAll(((CompoundProcessor) processor).flattenProcessors()); + } else { + flattened.add(processor); + } + } + return flattened; + } + @Override public String getType() { return "compound"; diff --git a/core/src/main/java/org/elasticsearch/ingest/core/IngestInfo.java b/core/src/main/java/org/elasticsearch/ingest/core/IngestInfo.java new file mode 100644 index 00000000000..d128732203f --- /dev/null +++ b/core/src/main/java/org/elasticsearch/ingest/core/IngestInfo.java @@ -0,0 +1,99 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest.core; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.TreeSet; + +public class IngestInfo implements Writeable, ToXContent { + + private final Set processors; + + public IngestInfo(StreamInput in) throws IOException { + this(Collections.emptyList()); + final int size = in.readVInt(); + for (int i = 0; i < size; i++) { + processors.add(new ProcessorInfo(in)); + } + } + + public IngestInfo(List processors) { + this.processors = new TreeSet<>(processors); // we use a treeset here to have a test-able / predictable order + } + + public Iterable getProcessors() { + return processors; + } + + public boolean containsProcessor(String type) { + return processors.contains(new ProcessorInfo(type)); + } + + @Override + public IngestInfo readFrom(StreamInput in) throws IOException { + return new IngestInfo(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.write(processors.size()); + for (ProcessorInfo info : processors) { + info.writeTo(out); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("ingest"); + builder.startArray("processors"); + for (ProcessorInfo info : processors) { + info.toXContent(builder, params); + } + builder.endArray(); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + IngestInfo that = (IngestInfo) o; + return Objects.equals(processors, that.processors); + } + + @Override + public int hashCode() { + return Objects.hash(processors); + } +} diff --git a/core/src/main/java/org/elasticsearch/ingest/core/Pipeline.java b/core/src/main/java/org/elasticsearch/ingest/core/Pipeline.java index 9b887ec229c..821a44c0a96 100644 --- a/core/src/main/java/org/elasticsearch/ingest/core/Pipeline.java +++ b/core/src/main/java/org/elasticsearch/ingest/core/Pipeline.java @@ -83,6 +83,14 @@ public final class Pipeline { return compoundProcessor.getOnFailureProcessors(); } + /** + * Flattens the normal and on failure processors into a single list. The original order is lost. + * This can be useful for pipeline validation purposes. + */ + public List flattenAllProcessors() { + return compoundProcessor.flattenProcessors(); + } + public final static class Factory { public Pipeline create(String id, Map config, ProcessorsRegistry processorRegistry) throws Exception { diff --git a/core/src/main/java/org/elasticsearch/ingest/core/ProcessorInfo.java b/core/src/main/java/org/elasticsearch/ingest/core/ProcessorInfo.java new file mode 100644 index 00000000000..f652b182919 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/ingest/core/ProcessorInfo.java @@ -0,0 +1,88 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest.core; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; + +public class ProcessorInfo implements Writeable, ToXContent, Comparable { + + private final String type; + + public ProcessorInfo(StreamInput input) throws IOException { + type = input.readString(); + } + + public ProcessorInfo(String type) { + this.type = type; + } + + /** + * @return The unique processor type + */ + public String getType() { + return type; + } + + @Override + public ProcessorInfo readFrom(StreamInput in) throws IOException { + return new ProcessorInfo(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(this.type); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("type", type); + builder.endObject(); + return null; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + ProcessorInfo that = (ProcessorInfo) o; + + return type.equals(that.type); + + } + + @Override + public int hashCode() { + return type.hashCode(); + } + + @Override + public int compareTo(ProcessorInfo o) { + return type.compareTo(o.type); + } +} diff --git a/core/src/main/java/org/elasticsearch/monitor/fs/FsService.java b/core/src/main/java/org/elasticsearch/monitor/fs/FsService.java index 99a78f13a07..0287d5c522c 100644 --- a/core/src/main/java/org/elasticsearch/monitor/fs/FsService.java +++ b/core/src/main/java/org/elasticsearch/monitor/fs/FsService.java @@ -21,6 +21,7 @@ package org.elasticsearch.monitor.fs; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.SingleObjectCache; @@ -37,7 +38,8 @@ public class FsService extends AbstractComponent { private final SingleObjectCache fsStatsCache; public final static Setting REFRESH_INTERVAL_SETTING = - Setting.timeSetting("monitor.fs.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), false, Setting.Scope.CLUSTER); + Setting.timeSetting("monitor.fs.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), + Property.NodeScope); public FsService(Settings settings, NodeEnvironment nodeEnvironment) throws IOException { super(settings); diff --git a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmGcMonitorService.java b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmGcMonitorService.java index 97c813a0fe3..5a2d591c7dc 100644 --- a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmGcMonitorService.java +++ b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmGcMonitorService.java @@ -21,7 +21,7 @@ package org.elasticsearch.monitor.jvm; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Scope; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.FutureUtils; @@ -47,12 +47,14 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent ENABLED_SETTING = Setting.boolSetting("monitor.jvm.gc.enabled", true, false, Scope.CLUSTER); + public final static Setting ENABLED_SETTING = + Setting.boolSetting("monitor.jvm.gc.enabled", true, Property.NodeScope); public final static Setting REFRESH_INTERVAL_SETTING = - Setting.timeSetting("monitor.jvm.gc.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), false, Scope.CLUSTER); + Setting.timeSetting("monitor.jvm.gc.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), + Property.NodeScope); private static String GC_COLLECTOR_PREFIX = "monitor.jvm.gc.collector."; - public final static Setting GC_SETTING = Setting.groupSetting(GC_COLLECTOR_PREFIX, false, Scope.CLUSTER); + public final static Setting GC_SETTING = Setting.groupSetting(GC_COLLECTOR_PREFIX, Property.NodeScope); static class GcThreshold { public final String name; diff --git a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmService.java b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmService.java index fbec6cda168..e91c05e75ac 100644 --- a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmService.java +++ b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmService.java @@ -21,6 +21,7 @@ package org.elasticsearch.monitor.jvm; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -36,7 +37,8 @@ public class JvmService extends AbstractComponent { private JvmStats jvmStats; public final static Setting REFRESH_INTERVAL_SETTING = - Setting.timeSetting("monitor.jvm.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), false, Setting.Scope.CLUSTER); + Setting.timeSetting("monitor.jvm.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), + Property.NodeScope); public JvmService(Settings settings) { super(settings); diff --git a/core/src/main/java/org/elasticsearch/monitor/os/OsService.java b/core/src/main/java/org/elasticsearch/monitor/os/OsService.java index 5f836c6f928..d452094d7b0 100644 --- a/core/src/main/java/org/elasticsearch/monitor/os/OsService.java +++ b/core/src/main/java/org/elasticsearch/monitor/os/OsService.java @@ -21,6 +21,7 @@ package org.elasticsearch.monitor.os; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.SingleObjectCache; @@ -38,7 +39,8 @@ public class OsService extends AbstractComponent { private SingleObjectCache osStatsCache; public final static Setting REFRESH_INTERVAL_SETTING = - Setting.timeSetting("monitor.os.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), false, Setting.Scope.CLUSTER); + Setting.timeSetting("monitor.os.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), + Property.NodeScope); public OsService(Settings settings) { super(settings); diff --git a/core/src/main/java/org/elasticsearch/monitor/process/ProcessService.java b/core/src/main/java/org/elasticsearch/monitor/process/ProcessService.java index 9e3283af4fc..30c24f34c66 100644 --- a/core/src/main/java/org/elasticsearch/monitor/process/ProcessService.java +++ b/core/src/main/java/org/elasticsearch/monitor/process/ProcessService.java @@ -21,6 +21,7 @@ package org.elasticsearch.monitor.process; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.SingleObjectCache; @@ -35,7 +36,8 @@ public final class ProcessService extends AbstractComponent { private final SingleObjectCache processStatsCache; public final static Setting REFRESH_INTERVAL_SETTING = - Setting.timeSetting("monitor.process.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), false, Setting.Scope.CLUSTER); + Setting.timeSetting("monitor.process.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), + Property.NodeScope); public ProcessService(Settings settings) { super(settings); diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index a78a08ef32a..363250e07cd 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -30,14 +30,15 @@ import org.elasticsearch.client.Client; import org.elasticsearch.client.node.NodeClientModule; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.ClusterNameModule; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.MasterNodeChangePredicate; +import org.elasticsearch.cluster.NodeConnectionsService; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodeService; import org.elasticsearch.cluster.routing.RoutingService; -import org.elasticsearch.cluster.service.InternalClusterService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.StopWatch; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.component.LifecycleComponent; @@ -53,6 +54,7 @@ import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.common.transport.BoundTransportAddress; @@ -81,8 +83,6 @@ import org.elasticsearch.monitor.MonitorService; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.node.internal.InternalSettingsPreparer; import org.elasticsearch.node.service.NodeService; -import org.elasticsearch.percolator.PercolatorModule; -import org.elasticsearch.percolator.PercolatorService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsModule; import org.elasticsearch.plugins.PluginsService; @@ -129,16 +129,23 @@ import static org.elasticsearch.common.settings.Settings.settingsBuilder; */ public class Node implements Closeable { - public static final Setting WRITE_PORTS_FIELD_SETTING = Setting.boolSetting("node.portsfile", false, false, Setting.Scope.CLUSTER); - public static final Setting NODE_DATA_SETTING = Setting.boolSetting("node.data", true, false, Setting.Scope.CLUSTER); - public static final Setting NODE_MASTER_SETTING = Setting.boolSetting("node.master", true, false, Setting.Scope.CLUSTER); - public static final Setting NODE_LOCAL_SETTING = Setting.boolSetting("node.local", false, false, Setting.Scope.CLUSTER); - public static final Setting NODE_MODE_SETTING = new Setting<>("node.mode", "network", Function.identity(), false, Setting.Scope.CLUSTER); - public static final Setting NODE_INGEST_SETTING = Setting.boolSetting("node.ingest", true, false, Setting.Scope.CLUSTER); - public static final Setting NODE_NAME_SETTING = Setting.simpleString("node.name", false, Setting.Scope.CLUSTER); + public static final Setting WRITE_PORTS_FIELD_SETTING = + Setting.boolSetting("node.portsfile", false, Property.NodeScope); + public static final Setting NODE_CLIENT_SETTING = + Setting.boolSetting("node.client", false, Property.NodeScope); + public static final Setting NODE_DATA_SETTING = Setting.boolSetting("node.data", true, Property.NodeScope); + public static final Setting NODE_MASTER_SETTING = + Setting.boolSetting("node.master", true, Property.NodeScope); + public static final Setting NODE_LOCAL_SETTING = + Setting.boolSetting("node.local", false, Property.NodeScope); + public static final Setting NODE_MODE_SETTING = + new Setting<>("node.mode", "network", Function.identity(), Property.NodeScope); + public static final Setting NODE_INGEST_SETTING = + Setting.boolSetting("node.ingest", true, Property.NodeScope); + public static final Setting NODE_NAME_SETTING = Setting.simpleString("node.name", Property.NodeScope); // this sucks that folks can mistype data, master or ingest and get away with it. // TODO: we should move this to node.attribute.${name} = ${value} instead. - public static final Setting NODE_ATTRIBUTES = Setting.groupSetting("node.", false, Setting.Scope.CLUSTER); + public static final Setting NODE_ATTRIBUTES = Setting.groupSetting("node.", Property.NodeScope); private static final String CLIENT_TYPE = "node"; @@ -216,7 +223,6 @@ public class Node implements Closeable { modules.add(new ActionModule(DiscoveryNode.ingestNode(settings), false)); modules.add(new GatewayModule(settings)); modules.add(new NodeClientModule()); - modules.add(new PercolatorModule()); modules.add(new ResourceWatcherModule()); modules.add(new RepositoriesModule()); modules.add(new TribeModule()); @@ -289,9 +295,11 @@ public class Node implements Closeable { injector.getInstance(MonitorService.class).start(); injector.getInstance(RestController.class).start(); - assert injector.getInstance(ClusterService.class) instanceof InternalClusterService : - "node cluster service implementation must inherit from InternalClusterService"; - final InternalClusterService clusterService = (InternalClusterService) injector.getInstance(ClusterService.class); + final ClusterService clusterService = injector.getInstance(ClusterService.class); + + final NodeConnectionsService nodeConnectionsService = injector.getInstance(NodeConnectionsService.class); + nodeConnectionsService.start(); + clusterService.setNodeConnectionsService(nodeConnectionsService); // TODO hack around circular dependencies problems injector.getInstance(GatewayAllocator.class).setReallocation(clusterService, injector.getInstance(RoutingService.class)); @@ -310,24 +318,30 @@ public class Node implements Closeable { // Start the transport service now so the publish address will be added to the local disco node in ClusterService TransportService transportService = injector.getInstance(TransportService.class); transportService.start(); + DiscoveryNode localNode = injector.getInstance(DiscoveryNodeService.class) + .buildLocalNode(transportService.boundAddress().publishAddress()); + + // TODO: need to find a cleaner way to start/construct a service with some initial parameters, + // playing nice with the life cycle interfaces + clusterService.setLocalNode(localNode); + transportService.setLocalNode(localNode); + clusterService.add(transportService.getTaskManager()); + clusterService.start(); // start after cluster service so the local disco is known discovery.start(); transportService.acceptIncomingRequests(); discovery.startInitialJoin(); - // tribe nodes don't have a master so we shouldn't register an observer if (DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.get(settings).millis() > 0) { final ThreadPool thread = injector.getInstance(ThreadPool.class); ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, thread.getThreadContext()); - final CountDownLatch latch = new CountDownLatch(1); if (observer.observedState().nodes().masterNodeId() == null) { + final CountDownLatch latch = new CountDownLatch(1); observer.waitForNextChange(new ClusterStateObserver.Listener() { @Override - public void onNewClusterState(ClusterState state) { - latch.countDown(); - } + public void onNewClusterState(ClusterState state) { latch.countDown(); } @Override public void onClusterServiceClose() { @@ -336,16 +350,17 @@ public class Node implements Closeable { @Override public void onTimeout(TimeValue timeout) { - assert false; + logger.warn("timed out while waiting for initial discovery state - timeout: {}", + DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.get(settings)); + latch.countDown(); } - // use null timeout as we use timeout on the latchwait - }, MasterNodeChangePredicate.INSTANCE, null); - } + }, MasterNodeChangePredicate.INSTANCE, DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.get(settings)); - try { - latch.await(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.get(settings).millis(), TimeUnit.MILLISECONDS); - } catch (InterruptedException e) { - throw new ElasticsearchTimeoutException("Interrupted while waiting for initial discovery state"); + try { + latch.await(); + } catch (InterruptedException e) { + throw new ElasticsearchTimeoutException("Interrupted while waiting for initial discovery state"); + } } } @@ -393,6 +408,7 @@ public class Node implements Closeable { injector.getInstance(RoutingService.class).stop(); injector.getInstance(ClusterService.class).stop(); injector.getInstance(Discovery.class).stop(); + injector.getInstance(NodeConnectionsService.class).stop(); injector.getInstance(MonitorService.class).stop(); injector.getInstance(GatewayService.class).stop(); injector.getInstance(SearchService.class).stop(); @@ -450,6 +466,8 @@ public class Node implements Closeable { toClose.add(injector.getInstance(RoutingService.class)); toClose.add(() -> stopWatch.stop().start("cluster")); toClose.add(injector.getInstance(ClusterService.class)); + toClose.add(() -> stopWatch.stop().start("node_connections_service")); + toClose.add(injector.getInstance(NodeConnectionsService.class)); toClose.add(() -> stopWatch.stop().start("discovery")); toClose.add(injector.getInstance(Discovery.class)); toClose.add(() -> stopWatch.stop().start("monitor")); @@ -462,8 +480,6 @@ public class Node implements Closeable { toClose.add(injector.getInstance(RestController.class)); toClose.add(() -> stopWatch.stop().start("transport")); toClose.add(injector.getInstance(TransportService.class)); - toClose.add(() -> stopWatch.stop().start("percolator_service")); - toClose.add(injector.getInstance(PercolatorService.class)); for (Class plugin : pluginsService.nodeServices()) { toClose.add(() -> stopWatch.stop().start("plugin(" + plugin.getName() + ")")); diff --git a/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java b/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java index faf449586c1..8864a70ccdc 100644 --- a/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java +++ b/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java @@ -23,9 +23,10 @@ import org.elasticsearch.bootstrap.BootstrapInfo; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.cli.Terminal; +import org.elasticsearch.cli.Terminal; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.env.Environment; @@ -52,12 +53,13 @@ import static org.elasticsearch.common.settings.Settings.settingsBuilder; public class InternalSettingsPreparer { private static final String[] ALLOWED_SUFFIXES = {".yml", ".yaml", ".json", ".properties"}; - static final String[] PROPERTY_PREFIXES = {"es.", "elasticsearch."}; - static final String[] PROPERTY_DEFAULTS_PREFIXES = {"es.default.", "elasticsearch.default."}; + static final String PROPERTY_PREFIX = "es."; + static final String PROPERTY_DEFAULTS_PREFIX = "es.default."; public static final String SECRET_PROMPT_VALUE = "${prompt.secret}"; public static final String TEXT_PROMPT_VALUE = "${prompt.text}"; - public static final Setting IGNORE_SYSTEM_PROPERTIES_SETTING = Setting.boolSetting("config.ignore_system_properties", false, false, Setting.Scope.CLUSTER); + public static final Setting IGNORE_SYSTEM_PROPERTIES_SETTING = + Setting.boolSetting("config.ignore_system_properties", false, Property.NodeScope); /** * Prepares the settings by gathering all elasticsearch system properties and setting defaults. @@ -124,13 +126,9 @@ public class InternalSettingsPreparer { output.put(input); if (useSystemProperties(input)) { if (loadDefaults) { - for (String prefix : PROPERTY_DEFAULTS_PREFIXES) { - output.putProperties(prefix, BootstrapInfo.getSystemProperties()); - } - } - for (String prefix : PROPERTY_PREFIXES) { - output.putProperties(prefix, BootstrapInfo.getSystemProperties(), PROPERTY_DEFAULTS_PREFIXES); + output.putProperties(PROPERTY_DEFAULTS_PREFIX, BootstrapInfo.getSystemProperties()); } + output.putProperties(PROPERTY_PREFIX, BootstrapInfo.getSystemProperties(), PROPERTY_DEFAULTS_PREFIX); } output.replacePropertyPlaceholders(); } diff --git a/core/src/main/java/org/elasticsearch/node/service/NodeService.java b/core/src/main/java/org/elasticsearch/node/service/NodeService.java index b5b8e8f2cb6..cb11fc02443 100644 --- a/core/src/main/java/org/elasticsearch/node/service/NodeService.java +++ b/core/src/main/java/org/elasticsearch/node/service/NodeService.java @@ -24,7 +24,7 @@ import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; -import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; @@ -84,13 +84,13 @@ public class NodeService extends AbstractComponent implements Closeable { this.transportService = transportService; this.indicesService = indicesService; this.discovery = discovery; - discovery.setNodeService(this); this.version = version; this.pluginService = pluginService; this.circuitBreakerService = circuitBreakerService; this.ingestService = new IngestService(settings, threadPool, processorsRegistryBuilder); this.settingsFilter = settingsFilter; clusterService.add(ingestService.getPipelineStore()); + clusterService.add(ingestService.getPipelineExecutionService()); } // can not use constructor injection or there will be a circular dependency @@ -132,12 +132,13 @@ public class NodeService extends AbstractComponent implements Closeable { threadPool.info(), transportService.info(), httpServer == null ? null : httpServer.info(), - pluginService == null ? null : pluginService.info() + pluginService == null ? null : pluginService.info(), + ingestService == null ? null : ingestService.info() ); } public NodeInfo info(boolean settings, boolean os, boolean process, boolean jvm, boolean threadPool, - boolean transport, boolean http, boolean plugin) { + boolean transport, boolean http, boolean plugin, boolean ingest) { return new NodeInfo(version, Build.CURRENT, discovery.localNode(), serviceAttributes, settings ? settingsFilter.filter(this.settings) : null, os ? monitorService.osService().info() : null, @@ -146,7 +147,8 @@ public class NodeService extends AbstractComponent implements Closeable { threadPool ? this.threadPool.info() : null, transport ? transportService.info() : null, http ? (httpServer == null ? null : httpServer.info()) : null, - plugin ? (pluginService == null ? null : pluginService.info()) : null + plugin ? (pluginService == null ? null : pluginService.info()) : null, + ingest ? (ingestService == null ? null : ingestService.info()) : null ); } @@ -164,13 +166,14 @@ public class NodeService extends AbstractComponent implements Closeable { httpServer == null ? null : httpServer.stats(), circuitBreakerService.stats(), scriptService.stats(), - discovery.stats() + discovery.stats(), + ingestService.getPipelineExecutionService().stats() ); } public NodeStats stats(CommonStatsFlags indices, boolean os, boolean process, boolean jvm, boolean threadPool, boolean fs, boolean transport, boolean http, boolean circuitBreaker, - boolean script, boolean discoveryStats) { + boolean script, boolean discoveryStats, boolean ingest) { // for indices stats we want to include previous allocated shards stats as well (it will // only be applied to the sensible ones to use, like refresh/merge/flush/indexing stats) return new NodeStats(discovery.localNode(), System.currentTimeMillis(), @@ -184,7 +187,8 @@ public class NodeService extends AbstractComponent implements Closeable { http ? (httpServer == null ? null : httpServer.stats()) : null, circuitBreaker ? circuitBreakerService.stats() : null, script ? scriptService.stats() : null, - discoveryStats ? discovery.stats() : null + discoveryStats ? discovery.stats() : null, + ingest ? ingestService.getPipelineExecutionService().stats() : null ); } diff --git a/core/src/main/java/org/elasticsearch/percolator/MultiDocumentPercolatorIndex.java b/core/src/main/java/org/elasticsearch/percolator/MultiDocumentPercolatorIndex.java deleted file mode 100644 index 9d091a4c0bd..00000000000 --- a/core/src/main/java/org/elasticsearch/percolator/MultiDocumentPercolatorIndex.java +++ /dev/null @@ -1,150 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - - -package org.elasticsearch.percolator; - -import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.index.IndexOptions; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexableField; -import org.apache.lucene.index.LeafReader; -import org.apache.lucene.index.MultiReader; -import org.apache.lucene.index.SlowCompositeReaderWrapper; -import org.apache.lucene.index.memory.MemoryIndex; -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.Collector; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.Weight; -import org.apache.lucene.util.CloseableThreadLocal; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.lucene.search.Queries; -import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.ParseContext; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.mapper.internal.UidFieldMapper; - -import java.io.IOException; -import java.util.List; - - -/** - * Implementation of {@link PercolatorIndex} that can hold multiple Lucene documents by - * opening multiple {@link MemoryIndex} based IndexReaders and wrapping them via a single top level reader. - */ -class MultiDocumentPercolatorIndex implements PercolatorIndex { - - private final CloseableThreadLocal cache; - - MultiDocumentPercolatorIndex(CloseableThreadLocal cache) { - this.cache = cache; - } - - @Override - public void prepare(PercolateContext context, ParsedDocument parsedDocument) { - IndexReader[] memoryIndices = new IndexReader[parsedDocument.docs().size()]; - List docs = parsedDocument.docs(); - int rootDocIndex = docs.size() - 1; - assert rootDocIndex > 0; - MemoryIndex rootDocMemoryIndex = null; - for (int i = 0; i < docs.size(); i++) { - ParseContext.Document d = docs.get(i); - MemoryIndex memoryIndex; - if (rootDocIndex == i) { - // the last doc is always the rootDoc, since that is usually the biggest document it make sense - // to reuse the MemoryIndex it uses - memoryIndex = rootDocMemoryIndex = cache.get(); - } else { - memoryIndex = new MemoryIndex(true); - } - memoryIndices[i] = indexDoc(d, memoryIndex, context, parsedDocument).createSearcher().getIndexReader(); - } - try { - MultiReader mReader = new MultiReader(memoryIndices, true); - LeafReader slowReader = SlowCompositeReaderWrapper.wrap(mReader); - final IndexSearcher slowSearcher = new IndexSearcher(slowReader) { - - @Override - public Weight createNormalizedWeight(Query query, boolean needsScores) throws IOException { - BooleanQuery.Builder bq = new BooleanQuery.Builder(); - bq.add(query, BooleanClause.Occur.MUST); - bq.add(Queries.newNestedFilter(), BooleanClause.Occur.MUST_NOT); - return super.createNormalizedWeight(bq.build(), needsScores); - } - - }; - slowSearcher.setQueryCache(null); - DocSearcher docSearcher = new DocSearcher(slowSearcher, rootDocMemoryIndex); - context.initialize(docSearcher, parsedDocument); - } catch (IOException e) { - throw new ElasticsearchException("Failed to create index for percolator with nested document ", e); - } - } - - MemoryIndex indexDoc(ParseContext.Document d, MemoryIndex memoryIndex, PercolateContext context, ParsedDocument parsedDocument) { - for (IndexableField field : d.getFields()) { - Analyzer analyzer = context.analysisService().defaultIndexAnalyzer(); - DocumentMapper documentMapper = context.mapperService().documentMapper(parsedDocument.type()); - if (documentMapper != null && documentMapper.mappers().getMapper(field.name()) != null) { - analyzer = documentMapper.mappers().indexAnalyzer(); - } - if (field.fieldType().indexOptions() == IndexOptions.NONE && field.name().equals(UidFieldMapper.NAME)) { - continue; - } - try { - // TODO: instead of passing null here, we can have a CTL> and pass previous, - // like the indexer does - try (TokenStream tokenStream = field.tokenStream(analyzer, null)) { - if (tokenStream != null) { - memoryIndex.addField(field.name(), tokenStream, field.boost()); - } - } - } catch (IOException e) { - throw new ElasticsearchException("Failed to create token stream", e); - } - } - return memoryIndex; - } - - private class DocSearcher extends Engine.Searcher { - - private final MemoryIndex rootDocMemoryIndex; - - private DocSearcher(IndexSearcher searcher, MemoryIndex rootDocMemoryIndex) { - super("percolate", searcher); - this.rootDocMemoryIndex = rootDocMemoryIndex; - } - - @Override - public void close() { - try { - this.reader().close(); - rootDocMemoryIndex.reset(); - } catch (IOException e) { - throw new ElasticsearchException("failed to close IndexReader in percolator with nested doc", e); - } - } - - } -} diff --git a/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java b/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java deleted file mode 100644 index f73c8f31a07..00000000000 --- a/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java +++ /dev/null @@ -1,691 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.percolator; - - -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexableField; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.Collector; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.Sort; -import org.apache.lucene.search.FieldDoc; -import org.apache.lucene.util.Counter; -import org.elasticsearch.action.percolate.PercolateShardRequest; -import org.elasticsearch.action.search.SearchType; -import org.elasticsearch.cache.recycler.PageCacheRecycler; -import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.text.Text; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.analysis.AnalysisService; -import org.elasticsearch.index.cache.bitset.BitsetFilterCache; -import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.fielddata.IndexFieldDataService; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.mapper.object.ObjectMapper; -import org.elasticsearch.index.query.ParsedQuery; -import org.elasticsearch.index.query.QueryShardContext; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.similarity.SimilarityService; -import org.elasticsearch.script.ScriptService; -import org.elasticsearch.search.SearchHitField; -import org.elasticsearch.search.SearchShardTarget; -import org.elasticsearch.search.aggregations.SearchContextAggregations; -import org.elasticsearch.search.dfs.DfsSearchResult; -import org.elasticsearch.search.fetch.FetchPhase; -import org.elasticsearch.search.fetch.FetchSearchResult; -import org.elasticsearch.search.fetch.FetchSubPhase; -import org.elasticsearch.search.fetch.FetchSubPhaseContext; -import org.elasticsearch.search.fetch.innerhits.InnerHitsContext; -import org.elasticsearch.search.fetch.script.ScriptFieldsContext; -import org.elasticsearch.search.fetch.source.FetchSourceContext; -import org.elasticsearch.search.highlight.SearchContextHighlight; -import org.elasticsearch.search.internal.ContextIndexSearcher; -import org.elasticsearch.search.internal.InternalSearchHit; -import org.elasticsearch.search.internal.InternalSearchHitField; -import org.elasticsearch.search.internal.ScrollContext; -import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.internal.ShardSearchRequest; -import org.elasticsearch.search.lookup.LeafSearchLookup; -import org.elasticsearch.search.lookup.SearchLookup; -import org.elasticsearch.search.profile.Profilers; -import org.elasticsearch.search.query.QuerySearchResult; -import org.elasticsearch.search.rescore.RescoreSearchContext; -import org.elasticsearch.search.suggest.SuggestionSearchContext; - -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/** - */ -public class PercolateContext extends SearchContext { - - private int size = 10; - private boolean trackScores; - - private final SearchShardTarget searchShardTarget; - private final IndexService indexService; - private final IndexFieldDataService fieldDataService; - private final IndexShard indexShard; - private final PageCacheRecycler pageCacheRecycler; - private final BigArrays bigArrays; - private final ScriptService scriptService; - private final MapperService mapperService; - private final int numberOfShards; - private final Query aliasFilter; - private final long originNanoTime = System.nanoTime(); - private final long startTime; - private final boolean onlyCount; - private Engine.Searcher docSearcher; - private Engine.Searcher engineSearcher; - private ContextIndexSearcher searcher; - - private SearchContextHighlight highlight; - private ParsedQuery parsedQuery; - private Query query; - private Query percolateQuery; - private FetchSubPhase.HitContext hitContext; - private SearchContextAggregations aggregations; - private QuerySearchResult querySearchResult; - private Sort sort; - private final Map subPhaseContexts = new HashMap<>(); - private final QueryShardContext queryShardContext; - private final Map, Collector> queryCollectors = new HashMap<>(); - private SearchLookup searchLookup; - private final FetchPhase fetchPhase; - - public PercolateContext(PercolateShardRequest request, SearchShardTarget searchShardTarget, IndexShard indexShard, - IndexService indexService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, ScriptService scriptService, - Query aliasFilter, ParseFieldMatcher parseFieldMatcher, FetchPhase fetchPhase) { - super(parseFieldMatcher); - this.indexShard = indexShard; - this.indexService = indexService; - this.fetchPhase = fetchPhase; - this.fieldDataService = indexService.fieldData(); - this.mapperService = indexService.mapperService(); - this.searchShardTarget = searchShardTarget; - this.pageCacheRecycler = pageCacheRecycler; - this.bigArrays = bigArrays.withCircuitBreaking(); - this.querySearchResult = new QuerySearchResult(0, searchShardTarget); - this.engineSearcher = indexShard.acquireSearcher("percolate"); - this.searcher = new ContextIndexSearcher(engineSearcher, indexService.cache().query(), indexShard.getQueryCachingPolicy()); - this.scriptService = scriptService; - this.numberOfShards = request.getNumberOfShards(); - this.aliasFilter = aliasFilter; - this.startTime = request.getStartTime(); - this.onlyCount = request.onlyCount(); - queryShardContext = indexService.newQueryShardContext(); - queryShardContext.setTypes(request.documentType()); - } - - // for testing: - PercolateContext(PercolateShardRequest request, SearchShardTarget searchShardTarget, MapperService mapperService, QueryShardContext queryShardContext) { - super(null); - this.searchShardTarget = searchShardTarget; - this.mapperService = mapperService; - this.indexService = null; - this.indexShard = null; - this.fieldDataService = null; - this.pageCacheRecycler = null; - this.bigArrays = null; - this.scriptService = null; - this.aliasFilter = null; - this.startTime = 0; - this.numberOfShards = 0; - this.onlyCount = true; - this.queryShardContext = queryShardContext; - this.fetchPhase = null; - } - - public IndexSearcher docSearcher() { - return docSearcher.searcher(); - } - - public void initialize(Engine.Searcher docSearcher, ParsedDocument parsedDocument) { - this.docSearcher = docSearcher; - IndexReader indexReader = docSearcher.reader(); - LeafReaderContext atomicReaderContext = indexReader.leaves().get(0); - this.searchLookup = new SearchLookup(mapperService(), fieldData(), queryShardContext.getTypes()); - LeafSearchLookup leafLookup = searchLookup.getLeafSearchLookup(atomicReaderContext); - leafLookup.setDocument(0); - leafLookup.source().setSource(parsedDocument.source()); - - Map fields = new HashMap<>(); - for (IndexableField field : parsedDocument.rootDoc().getFields()) { - fields.put(field.name(), new InternalSearchHitField(field.name(), Collections.emptyList())); - } - hitContext().reset( - new InternalSearchHit(0, "unknown", new Text(parsedDocument.type()), fields), - atomicReaderContext, 0, docSearcher.searcher() - ); - } - - @Override - public IndexShard indexShard() { - return indexShard; - } - - public IndexService indexService() { - return indexService; - } - - public Query percolateQuery() { - return percolateQuery; - } - - public void percolateQuery(Query percolateQuery) { - this.percolateQuery = percolateQuery; - } - - public FetchSubPhase.HitContext hitContext() { - if (hitContext == null) { - hitContext = new FetchSubPhase.HitContext(); - } - return hitContext; - } - - public boolean isOnlyCount() { - return onlyCount; - } - - public Query percolatorTypeFilter(){ - return indexService().mapperService().documentMapper(PercolatorService.TYPE_NAME).typeFilter(); - } - - @Override - public SearchContextHighlight highlight() { - return highlight; - } - - @Override - public void highlight(SearchContextHighlight highlight) { - if (highlight != null) { - // Enforce highlighting by source, because MemoryIndex doesn't support stored fields. - highlight.globalForceSource(true); - } - this.highlight = highlight; - } - - @Override - public SearchShardTarget shardTarget() { - return searchShardTarget; - } - - @Override - public SearchLookup lookup() { - // we cache this since it's really just a single document lookup - check the init method for details - assert searchLookup != null : "context is not initialized"; - assert Arrays.equals(searchLookup.doc().getTypes(), getQueryShardContext().getTypes()) : "types mismatch - can't return lookup"; - return this.searchLookup; - } - - @Override - protected void doClose() { - Releasables.close(engineSearcher, docSearcher); - } - - @Override - public MapperService mapperService() { - return mapperService; - } - - @Override - public SearchContext parsedQuery(ParsedQuery query) { - this.parsedQuery = query; - this.query = query.query(); - return this; - } - - @Override - public ParsedQuery parsedQuery() { - return parsedQuery; - } - - @Override - public Query query() { - return query; - } - - @Override - public IndexFieldDataService fieldData() { - return fieldDataService; - } - - @Override - public SearchContextAggregations aggregations() { - return aggregations; - } - - @Override - public SearchContext aggregations(SearchContextAggregations aggregations) { - this.aggregations = aggregations; - return this; - } - - @Override - public SubPhaseContext getFetchSubPhaseContext(FetchSubPhase.ContextFactory contextFactory) { - String subPhaseName = contextFactory.getName(); - if (subPhaseContexts.get(subPhaseName) == null) { - subPhaseContexts.put(subPhaseName, contextFactory.newContextInstance()); - } - return (SubPhaseContext) subPhaseContexts.get(subPhaseName); - } - - // Unused: - @Override - public void preProcess() { - throw new UnsupportedOperationException(); - } - - @Override - public Query searchFilter(String[] types) { - return aliasFilter(); - } - - @Override - public long id() { - throw new UnsupportedOperationException(); - } - - @Override - public String source() { - throw new UnsupportedOperationException(); - } - - @Override - public ShardSearchRequest request() { - throw new UnsupportedOperationException(); - } - - @Override - public SearchType searchType() { - throw new UnsupportedOperationException(); - } - - @Override - public SearchContext searchType(SearchType searchType) { - throw new UnsupportedOperationException(); - } - - @Override - public int numberOfShards() { - return numberOfShards; - } - - @Override - public float queryBoost() { - throw new UnsupportedOperationException(); - } - - @Override - public SearchContext queryBoost(float queryBoost) { - throw new UnsupportedOperationException(); - } - - @Override - public long getOriginNanoTime() { - return originNanoTime; - } - - @Override - protected long nowInMillisImpl() { - return startTime; - } - - @Override - public ScrollContext scrollContext() { - throw new UnsupportedOperationException(); - } - - @Override - public SearchContext scrollContext(ScrollContext scroll) { - throw new UnsupportedOperationException(); - } - - @Override - public SuggestionSearchContext suggest() { - throw new UnsupportedOperationException(); - } - - @Override - public void suggest(SuggestionSearchContext suggest) { - throw new UnsupportedOperationException(); - } - - @Override - public List rescore() { - throw new UnsupportedOperationException(); - } - - @Override - public void addRescore(RescoreSearchContext rescore) { - throw new UnsupportedOperationException(); - } - - @Override - public boolean hasScriptFields() { - throw new UnsupportedOperationException(); - } - - @Override - public ScriptFieldsContext scriptFields() { - throw new UnsupportedOperationException(); - } - - @Override - public boolean sourceRequested() { - throw new UnsupportedOperationException(); - } - - @Override - public boolean hasFetchSourceContext() { - throw new UnsupportedOperationException(); - } - - @Override - public FetchSourceContext fetchSourceContext() { - throw new UnsupportedOperationException(); - } - - @Override - public SearchContext fetchSourceContext(FetchSourceContext fetchSourceContext) { - throw new UnsupportedOperationException(); - } - - @Override - public ContextIndexSearcher searcher() { - return searcher; - } - - @Override - public AnalysisService analysisService() { - return indexService.analysisService(); - } - - @Override - public SimilarityService similarityService() { - return indexService.similarityService(); - } - - @Override - public ScriptService scriptService() { - return scriptService; - } - - @Override - public PageCacheRecycler pageCacheRecycler() { - return pageCacheRecycler; - } - - @Override - public BigArrays bigArrays() { - return bigArrays; - } - - @Override - public BitsetFilterCache bitsetFilterCache() { - return indexService.cache().bitsetFilterCache(); - } - - @Override - public long timeoutInMillis() { - return -1; - } - - @Override - public void timeoutInMillis(long timeoutInMillis) { - throw new UnsupportedOperationException(); - } - - @Override - public int terminateAfter() { - return DEFAULT_TERMINATE_AFTER; - } - - @Override - public void terminateAfter(int terminateAfter) { - throw new UnsupportedOperationException(); - } - - @Override - public SearchContext minimumScore(float minimumScore) { - throw new UnsupportedOperationException(); - } - - @Override - public Float minimumScore() { - return null; - } - - @Override - public SearchContext sort(Sort sort) { - this.sort = sort; - return this; - } - - @Override - public Sort sort() { - return sort; - } - - @Override - public SearchContext trackScores(boolean trackScores) { - this.trackScores = trackScores; - return this; - } - - @Override - public boolean trackScores() { - return trackScores; - } - - @Override - public SearchContext searchAfter(FieldDoc searchAfter) { - throw new UnsupportedOperationException(); - } - - @Override - public FieldDoc searchAfter() { - return null; - } - - @Override - public SearchContext parsedPostFilter(ParsedQuery postFilter) { - throw new UnsupportedOperationException(); - } - - @Override - public ParsedQuery parsedPostFilter() { - return null; - } - - @Override - public Query aliasFilter() { - return aliasFilter; - } - - @Override - public int from() { - return 0; - } - - @Override - public SearchContext from(int from) { - throw new UnsupportedOperationException(); - } - - @Override - public int size() { - return size; - } - - @Override - public SearchContext size(int size) { - this.size = size; - return this; - } - - @Override - public boolean hasFieldNames() { - throw new UnsupportedOperationException(); - } - - @Override - public List fieldNames() { - throw new UnsupportedOperationException(); - } - - @Override - public void emptyFieldNames() { - throw new UnsupportedOperationException(); - } - - @Override - public boolean explain() { - throw new UnsupportedOperationException(); - } - - @Override - public void explain(boolean explain) { - throw new UnsupportedOperationException(); - } - - @Override - public List groupStats() { - throw new UnsupportedOperationException(); - } - - @Override - public void groupStats(List groupStats) { - throw new UnsupportedOperationException(); - } - - @Override - public boolean version() { - throw new UnsupportedOperationException(); - } - - @Override - public void version(boolean version) { - throw new UnsupportedOperationException(); - } - - @Override - public int[] docIdsToLoad() { - throw new UnsupportedOperationException(); - } - - @Override - public int docIdsToLoadFrom() { - throw new UnsupportedOperationException(); - } - - @Override - public int docIdsToLoadSize() { - throw new UnsupportedOperationException(); - } - - @Override - public SearchContext docIdsToLoad(int[] docIdsToLoad, int docsIdsToLoadFrom, int docsIdsToLoadSize) { - throw new UnsupportedOperationException(); - } - - @Override - public void accessed(long accessTime) { - throw new UnsupportedOperationException(); - } - - @Override - public long lastAccessTime() { - throw new UnsupportedOperationException(); - } - - @Override - public long keepAlive() { - throw new UnsupportedOperationException(); - } - - @Override - public void keepAlive(long keepAlive) { - throw new UnsupportedOperationException(); - } - - @Override - public DfsSearchResult dfsResult() { - throw new UnsupportedOperationException(); - } - - @Override - public QuerySearchResult queryResult() { - return querySearchResult; - } - - @Override - public FetchSearchResult fetchResult() { - throw new UnsupportedOperationException(); - } - - @Override - public FetchPhase fetchPhase() { - return fetchPhase; - } - - @Override - public MappedFieldType smartNameFieldType(String name) { - return mapperService().fullName(name); - } - - @Override - public ObjectMapper getObjectMapper(String name) { - throw new UnsupportedOperationException(); - } - - @Override - public Counter timeEstimateCounter() { - throw new UnsupportedOperationException(); - } - - @Override - public InnerHitsContext innerHits() { - throw new UnsupportedOperationException(); - } - - @Override - public Map, Collector> queryCollectors() { - return queryCollectors; - } - - @Override - public QueryShardContext getQueryShardContext() { - return queryShardContext; - } - - @Override - public Profilers getProfilers() { - throw new UnsupportedOperationException(); - } -} diff --git a/core/src/main/java/org/elasticsearch/percolator/PercolateDocumentParser.java b/core/src/main/java/org/elasticsearch/percolator/PercolateDocumentParser.java deleted file mode 100644 index 50db3cecaa6..00000000000 --- a/core/src/main/java/org/elasticsearch/percolator/PercolateDocumentParser.java +++ /dev/null @@ -1,215 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.percolator; - -import org.apache.lucene.search.ConstantScoreQuery; -import org.apache.lucene.search.Query; -import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.action.percolate.PercolateShardRequest; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.mapper.DocumentMapperForType; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.query.QueryShardContext; -import org.elasticsearch.search.SearchParseElement; -import org.elasticsearch.search.aggregations.AggregationPhase; -import org.elasticsearch.search.highlight.HighlightPhase; -import org.elasticsearch.search.sort.SortParseElement; - -import java.util.Map; - -import static org.elasticsearch.index.mapper.SourceToParse.source; - -public class PercolateDocumentParser { - - private final HighlightPhase highlightPhase; - private final SortParseElement sortParseElement; - private final AggregationPhase aggregationPhase; - - @Inject - public PercolateDocumentParser(HighlightPhase highlightPhase, SortParseElement sortParseElement, - AggregationPhase aggregationPhase) { - this.highlightPhase = highlightPhase; - this.sortParseElement = sortParseElement; - this.aggregationPhase = aggregationPhase; - } - - public ParsedDocument parse(final PercolateShardRequest request, final PercolateContext context, final MapperService mapperService) { - BytesReference source = request.source(); - if (source == null || source.length() == 0) { - if (request.docSource() != null && request.docSource().length() != 0) { - return parseFetchedDoc(context, request.docSource(), mapperService, request.shardId().getIndexName(), request.documentType()); - } else { - return null; - } - } - - // TODO: combine all feature parse elements into one map - Map hlElements = highlightPhase.parseElements(); - Map aggregationElements = aggregationPhase.parseElements(); - final QueryShardContext queryShardContext = context.getQueryShardContext(); - ParsedDocument doc = null; - // Some queries (function_score query when for decay functions) rely on a SearchContext being set: - // We switch types because this context needs to be in the context of the percolate queries in the shard and - // not the in memory percolate doc - final String[] previousTypes = queryShardContext.getTypes(); - queryShardContext.setTypes(PercolatorService.TYPE_NAME); - try (XContentParser parser = XContentFactory.xContent(source).createParser(source);) { - String currentFieldName = null; - XContentParser.Token token; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - // we need to check the "doc" here, so the next token will be START_OBJECT which is - // the actual document starting - if ("doc".equals(currentFieldName)) { - if (doc != null) { - throw new ElasticsearchParseException("Either specify doc or get, not both"); - } - - DocumentMapperForType docMapper = mapperService.documentMapperWithAutoCreate(request.documentType()); - String index = context.shardTarget().index(); - doc = docMapper.getDocumentMapper().parse(source(parser).index(index).type(request.documentType()).id("_id_for_percolate_api")); - if (docMapper.getMapping() != null) { - doc.addDynamicMappingsUpdate(docMapper.getMapping()); - } - // the document parsing exists the "doc" object, so we need to set the new current field. - currentFieldName = parser.currentName(); - } - } else if (token == XContentParser.Token.START_OBJECT) { - SearchParseElement element = hlElements.get(currentFieldName); - if (element == null) { - element = aggregationElements.get(currentFieldName); - } - - if ("query".equals(currentFieldName)) { - if (context.percolateQuery() != null) { - throw new ElasticsearchParseException("Either specify query or filter, not both"); - } - context.percolateQuery(queryShardContext.parse(parser).query()); - } else if ("filter".equals(currentFieldName)) { - if (context.percolateQuery() != null) { - throw new ElasticsearchParseException("Either specify query or filter, not both"); - } - Query filter = queryShardContext.parseInnerFilter(parser).query(); - context.percolateQuery(new ConstantScoreQuery(filter)); - } else if ("sort".equals(currentFieldName)) { - parseSort(parser, context); - } else if (element != null) { - element.parse(parser, context); - } - } else if (token == XContentParser.Token.START_ARRAY) { - if ("sort".equals(currentFieldName)) { - parseSort(parser, context); - } - } else if (token == null) { - break; - } else if (token.isValue()) { - if ("size".equals(currentFieldName)) { - context.size(parser.intValue()); - if (context.size() < 0) { - throw new ElasticsearchParseException("size is set to [{}] and is expected to be higher or equal to 0", context.size()); - } - } else if ("sort".equals(currentFieldName)) { - parseSort(parser, context); - } else if ("track_scores".equals(currentFieldName) || "trackScores".equals(currentFieldName)) { - context.trackScores(parser.booleanValue()); - } - } - } - - // We need to get the actual source from the request body for highlighting, so parse the request body again - // and only get the doc source. - if (context.highlight() != null) { - parser.close(); - currentFieldName = null; - try (XContentParser parserForHighlighter = XContentFactory.xContent(source).createParser(source)) { - token = parserForHighlighter.nextToken(); - assert token == XContentParser.Token.START_OBJECT; - while ((token = parserForHighlighter.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parserForHighlighter.currentName(); - } else if (token == XContentParser.Token.START_OBJECT) { - if ("doc".equals(currentFieldName)) { - BytesStreamOutput bStream = new BytesStreamOutput(); - XContentBuilder builder = XContentFactory.contentBuilder(XContentType.SMILE, bStream); - builder.copyCurrentStructure(parserForHighlighter); - builder.close(); - doc.setSource(bStream.bytes()); - break; - } else { - parserForHighlighter.skipChildren(); - } - } else if (token == null) { - break; - } - } - } - } - - } catch (Throwable e) { - throw new ElasticsearchParseException("failed to parse request", e); - } finally { - queryShardContext.setTypes(previousTypes); - } - - if (request.docSource() != null && request.docSource().length() != 0) { - if (doc != null) { - throw new IllegalArgumentException("Can't specify the document to percolate in the source of the request and as document id"); - } - - doc = parseFetchedDoc(context, request.docSource(), mapperService, request.shardId().getIndexName(), request.documentType()); - } - - if (doc == null) { - throw new IllegalArgumentException("Nothing to percolate"); - } - - return doc; - } - - private void parseSort(XContentParser parser, PercolateContext context) throws Exception { - context.trackScores(true); - sortParseElement.parse(parser, context); - // null, means default sorting by relevancy - if (context.sort() != null) { - throw new ElasticsearchParseException("Only _score desc is supported"); - } - } - - private ParsedDocument parseFetchedDoc(PercolateContext context, BytesReference fetchedDoc, MapperService mapperService, String index, String type) { - DocumentMapperForType docMapper = mapperService.documentMapperWithAutoCreate(type); - ParsedDocument doc = docMapper.getDocumentMapper().parse(source(fetchedDoc).index(index).type(type).id("_id_for_percolate_api")); - if (doc == null) { - throw new ElasticsearchParseException("No doc to percolate in the request"); - } - if (context.highlight() != null) { - doc.setSource(fetchedDoc); - } - return doc; - } - -} diff --git a/core/src/main/java/org/elasticsearch/percolator/PercolateException.java b/core/src/main/java/org/elasticsearch/percolator/PercolateException.java deleted file mode 100644 index 81a708a75ec..00000000000 --- a/core/src/main/java/org/elasticsearch/percolator/PercolateException.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.percolator; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchWrapperException; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.index.shard.ShardId; - -import java.io.IOException; -import java.util.Objects; - -/** - * Exception during percolating document(s) at runtime. - */ -public class PercolateException extends ElasticsearchException implements ElasticsearchWrapperException { - - private final ShardId shardId; - - public PercolateException(ShardId shardId, String msg, Throwable cause) { - super(msg, cause); - Objects.requireNonNull(shardId, "shardId must not be null"); - this.shardId = shardId; - } - - public ShardId getShardId() { - return shardId; - } - - public PercolateException(StreamInput in) throws IOException{ - super(in); - shardId = ShardId.readShardId(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - shardId.writeTo(out); - } -} diff --git a/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java b/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java deleted file mode 100644 index 1160aec969b..00000000000 --- a/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java +++ /dev/null @@ -1,376 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.percolator; - - -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.ReaderUtil; -import org.apache.lucene.index.memory.ExtendedMemoryIndex; -import org.apache.lucene.index.memory.MemoryIndex; -import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.MultiCollector; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.ScoreDoc; -import org.apache.lucene.search.TopDocs; -import org.apache.lucene.search.TopScoreDocCollector; -import org.apache.lucene.search.TotalHitCountCollector; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.CloseableThreadLocal; -import org.elasticsearch.Version; -import org.elasticsearch.action.percolate.PercolateResponse; -import org.elasticsearch.action.percolate.PercolateShardRequest; -import org.elasticsearch.action.percolate.PercolateShardResponse; -import org.elasticsearch.cache.recycler.PageCacheRecycler; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.text.Text; -import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.fieldvisitor.SingleFieldsVisitor; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.mapper.internal.UidFieldMapper; -import org.elasticsearch.index.percolator.PercolatorFieldMapper; -import org.elasticsearch.index.percolator.PercolatorQueriesRegistry; -import org.elasticsearch.index.query.ParsedQuery; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.script.ScriptService; -import org.elasticsearch.search.SearchShardTarget; -import org.elasticsearch.search.aggregations.AggregationPhase; -import org.elasticsearch.search.aggregations.Aggregator; -import org.elasticsearch.search.aggregations.BucketCollector; -import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregator; -import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator; -import org.elasticsearch.search.aggregations.support.AggregationContext; -import org.elasticsearch.search.fetch.FetchPhase; -import org.elasticsearch.search.highlight.HighlightField; -import org.elasticsearch.search.highlight.HighlightPhase; -import org.elasticsearch.search.internal.SearchContext; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; -import java.util.stream.StreamSupport; - -import static org.apache.lucene.search.BooleanClause.Occur.FILTER; -import static org.apache.lucene.search.BooleanClause.Occur.MUST; - -public class PercolatorService extends AbstractComponent implements Releasable { - - public final static float NO_SCORE = Float.NEGATIVE_INFINITY; - public final static String TYPE_NAME = ".percolator"; - - private final BigArrays bigArrays; - private final ScriptService scriptService; - private final IndicesService indicesService; - private final ClusterService clusterService; - private final HighlightPhase highlightPhase; - private final AggregationPhase aggregationPhase; - private final PageCacheRecycler pageCacheRecycler; - private final CloseableThreadLocal cache; - private final IndexNameExpressionResolver indexNameExpressionResolver; - private final PercolateDocumentParser percolateDocumentParser; - - private final PercolatorIndex single; - private final PercolatorIndex multi; - private final ParseFieldMatcher parseFieldMatcher; - private final FetchPhase fetchPhase; - - @Inject - public PercolatorService(Settings settings, IndexNameExpressionResolver indexNameExpressionResolver, IndicesService indicesService, - PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, - HighlightPhase highlightPhase, ClusterService clusterService, - AggregationPhase aggregationPhase, ScriptService scriptService, - PercolateDocumentParser percolateDocumentParser, FetchPhase fetchPhase) { - super(settings); - this.indexNameExpressionResolver = indexNameExpressionResolver; - this.percolateDocumentParser = percolateDocumentParser; - this.fetchPhase = fetchPhase; - this.parseFieldMatcher = new ParseFieldMatcher(settings); - this.indicesService = indicesService; - this.pageCacheRecycler = pageCacheRecycler; - this.bigArrays = bigArrays; - this.clusterService = clusterService; - this.scriptService = scriptService; - this.aggregationPhase = aggregationPhase; - this.highlightPhase = highlightPhase; - - final long maxReuseBytes = settings.getAsBytesSize("indices.memory.memory_index.size_per_thread", new ByteSizeValue(1, ByteSizeUnit.MB)).bytes(); - cache = new CloseableThreadLocal() { - @Override - protected MemoryIndex initialValue() { - // TODO: should we expose payloads as an option? should offsets be turned on always? - return new ExtendedMemoryIndex(true, false, maxReuseBytes); - } - }; - single = new SingleDocumentPercolatorIndex(cache); - multi = new MultiDocumentPercolatorIndex(cache); - } - - public ReduceResult reduce(boolean onlyCount, List shardResponses) throws IOException { - if (onlyCount) { - long finalCount = 0; - for (PercolateShardResponse shardResponse : shardResponses) { - finalCount += shardResponse.topDocs().totalHits; - } - - InternalAggregations reducedAggregations = reduceAggregations(shardResponses); - return new PercolatorService.ReduceResult(finalCount, reducedAggregations); - } else { - int requestedSize = shardResponses.get(0).requestedSize(); - TopDocs[] shardResults = new TopDocs[shardResponses.size()]; - long foundMatches = 0; - for (int i = 0; i < shardResults.length; i++) { - TopDocs shardResult = shardResponses.get(i).topDocs(); - foundMatches += shardResult.totalHits; - shardResults[i] = shardResult; - } - TopDocs merged = TopDocs.merge(requestedSize, shardResults); - PercolateResponse.Match[] matches = new PercolateResponse.Match[merged.scoreDocs.length]; - for (int i = 0; i < merged.scoreDocs.length; i++) { - ScoreDoc doc = merged.scoreDocs[i]; - PercolateShardResponse shardResponse = shardResponses.get(doc.shardIndex); - String id = shardResponse.ids().get(doc.doc); - Map hl = shardResponse.hls().get(doc.doc); - matches[i] = new PercolateResponse.Match(new Text(shardResponse.getIndex()), new Text(id), doc.score, hl); - } - InternalAggregations reducedAggregations = reduceAggregations(shardResponses); - return new PercolatorService.ReduceResult(foundMatches, matches, reducedAggregations); - } - } - - public PercolateShardResponse percolate(PercolateShardRequest request) throws IOException { - final IndexService percolateIndexService = indicesService.indexServiceSafe(request.shardId().getIndex()); - final IndexShard indexShard = percolateIndexService.getShard(request.shardId().id()); - indexShard.readAllowed(); // check if we can read the shard... - PercolatorQueriesRegistry percolateQueryRegistry = indexShard.percolateRegistry(); - percolateQueryRegistry.prePercolate(); - long startTime = System.nanoTime(); - - // TODO: The filteringAliases should be looked up at the coordinating node and serialized with all shard request, - // just like is done in other apis. - String[] filteringAliases = indexNameExpressionResolver.filteringAliases( - clusterService.state(), - indexShard.shardId().getIndex().getName(), - request.indices() - ); - Query aliasFilter = percolateIndexService.aliasFilter(percolateIndexService.newQueryShardContext(), filteringAliases); - - SearchShardTarget searchShardTarget = new SearchShardTarget(clusterService.localNode().id(), request.shardId().getIndex(), - request.shardId().id()); - final PercolateContext context = new PercolateContext(request, searchShardTarget, indexShard, percolateIndexService, - pageCacheRecycler, bigArrays, scriptService, aliasFilter, parseFieldMatcher, fetchPhase); - SearchContext.setCurrent(context); - try { - ParsedDocument parsedDocument = percolateDocumentParser.parse(request, context, percolateIndexService.mapperService()); - if (context.searcher().getIndexReader().maxDoc() == 0) { - return new PercolateShardResponse(Lucene.EMPTY_TOP_DOCS, Collections.emptyMap(), Collections.emptyMap(), context); - } - if (context.size() < 0) { - context.size(0); - } - - // parse the source either into one MemoryIndex, if it is a single document or index multiple docs if nested - PercolatorIndex percolatorIndex; - DocumentMapper documentMapper = indexShard.mapperService().documentMapper(request.documentType()); - boolean isNested = documentMapper != null && documentMapper.hasNestedObjects(); - if (parsedDocument.docs().size() > 1) { - assert isNested; - percolatorIndex = multi; - } else { - percolatorIndex = single; - } - percolatorIndex.prepare(context, parsedDocument); - - BucketCollector aggregatorCollector = null; - if (context.aggregations() != null) { - AggregationContext aggregationContext = new AggregationContext(context); - context.aggregations().aggregationContext(aggregationContext); - Aggregator[] aggregators = context.aggregations().factories().createTopLevelAggregators(); - List aggregatorCollectors = new ArrayList<>(aggregators.length); - for (int i = 0; i < aggregators.length; i++) { - if (!(aggregators[i] instanceof GlobalAggregator)) { - Aggregator aggregator = aggregators[i]; - aggregatorCollectors.add(aggregator); - } - } - context.aggregations().aggregators(aggregators); - aggregatorCollector = BucketCollector.wrap(aggregatorCollectors); - aggregatorCollector.preCollection(); - } - PercolatorQueriesRegistry queriesRegistry = indexShard.percolateRegistry(); - return doPercolate(context, queriesRegistry, aggregationPhase, aggregatorCollector, highlightPhase); - } finally { - SearchContext.removeCurrent(); - context.close(); - percolateQueryRegistry.postPercolate(System.nanoTime() - startTime); - } - } - - // moved the core percolation logic to a pck protected method to make testing easier: - static PercolateShardResponse doPercolate(PercolateContext context, PercolatorQueriesRegistry queriesRegistry, AggregationPhase aggregationPhase, @Nullable BucketCollector aggregatorCollector, HighlightPhase highlightPhase) throws IOException { - PercolatorQuery.Builder builder = new PercolatorQuery.Builder(context.docSearcher(), queriesRegistry.getPercolateQueries(), context.percolatorTypeFilter()); - if (queriesRegistry.indexSettings().getSettings().getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null).onOrAfter(Version.V_5_0_0)) { - builder.extractQueryTermsQuery(PercolatorFieldMapper.EXTRACTED_TERMS_FULL_FIELD_NAME, PercolatorFieldMapper.UNKNOWN_QUERY_FULL_FIELD_NAME); - } - if (context.percolateQuery() != null || context.aliasFilter() != null) { - BooleanQuery.Builder bq = new BooleanQuery.Builder(); - if (context.percolateQuery() != null) { - bq.add(context.percolateQuery(), MUST); - } - if (context.aliasFilter() != null) { - bq.add(context.aliasFilter(), FILTER); - } - builder.setPercolateQuery(bq.build()); - } - PercolatorQuery percolatorQuery = builder.build(); - - if (context.isOnlyCount() || context.size() == 0) { - TotalHitCountCollector collector = new TotalHitCountCollector(); - context.searcher().search(percolatorQuery, MultiCollector.wrap(collector, aggregatorCollector)); - if (aggregatorCollector != null) { - aggregatorCollector.postCollection(); - aggregationPhase.execute(context); - } - return new PercolateShardResponse(new TopDocs(collector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, 0f), Collections.emptyMap(), Collections.emptyMap(), context); - } else { - int size = context.size(); - if (size > context.searcher().getIndexReader().maxDoc()) { - // prevent easy OOM if more than the total number of docs that - // exist is requested... - size = context.searcher().getIndexReader().maxDoc(); - } - TopScoreDocCollector collector = TopScoreDocCollector.create(size); - context.searcher().search(percolatorQuery, MultiCollector.wrap(collector, aggregatorCollector)); - if (aggregatorCollector != null) { - aggregatorCollector.postCollection(); - aggregationPhase.execute(context); - } - - TopDocs topDocs = collector.topDocs(); - Map ids = new HashMap<>(topDocs.scoreDocs.length); - Map> hls = new HashMap<>(topDocs.scoreDocs.length); - for (ScoreDoc scoreDoc : topDocs.scoreDocs) { - if (context.trackScores() == false) { - // No sort or tracking scores was provided, so use special - // value to indicate to not show the scores: - scoreDoc.score = NO_SCORE; - } - - int segmentIdx = ReaderUtil.subIndex(scoreDoc.doc, context.searcher().getIndexReader().leaves()); - LeafReaderContext atomicReaderContext = context.searcher().getIndexReader().leaves().get(segmentIdx); - final int segmentDocId = scoreDoc.doc - atomicReaderContext.docBase; - SingleFieldsVisitor fieldsVisitor = new SingleFieldsVisitor(UidFieldMapper.NAME); - atomicReaderContext.reader().document(segmentDocId, fieldsVisitor); - String id = fieldsVisitor.uid().id(); - ids.put(scoreDoc.doc, id); - if (context.highlight() != null) { - Query query = queriesRegistry.getPercolateQueries().get(new BytesRef(id)); - context.parsedQuery(new ParsedQuery(query)); - context.hitContext().cache().clear(); - highlightPhase.hitExecute(context, context.hitContext()); - hls.put(scoreDoc.doc, context.hitContext().hit().getHighlightFields()); - } - } - return new PercolateShardResponse(topDocs, ids, hls, context); - } - } - - @Override - public void close() { - cache.close(); - } - - private InternalAggregations reduceAggregations(List shardResults) { - if (shardResults.get(0).aggregations() == null) { - return null; - } - - List aggregationsList = new ArrayList<>(shardResults.size()); - for (PercolateShardResponse shardResult : shardResults) { - aggregationsList.add(shardResult.aggregations()); - } - InternalAggregations aggregations = InternalAggregations.reduce(aggregationsList, new InternalAggregation.ReduceContext(bigArrays, scriptService)); - if (aggregations != null) { - List pipelineAggregators = shardResults.get(0).pipelineAggregators(); - if (pipelineAggregators != null) { - List newAggs = StreamSupport.stream(aggregations.spliterator(), false).map((p) -> { - return (InternalAggregation) p; - }).collect(Collectors.toList()); - for (SiblingPipelineAggregator pipelineAggregator : pipelineAggregators) { - InternalAggregation newAgg = pipelineAggregator.doReduce(new InternalAggregations(newAggs), - new InternalAggregation.ReduceContext(bigArrays, scriptService)); - newAggs.add(newAgg); - } - aggregations = new InternalAggregations(newAggs); - } - } - return aggregations; - } - - public final static class ReduceResult { - - private final long count; - private final PercolateResponse.Match[] matches; - private final InternalAggregations reducedAggregations; - - ReduceResult(long count, PercolateResponse.Match[] matches, InternalAggregations reducedAggregations) { - this.count = count; - this.matches = matches; - this.reducedAggregations = reducedAggregations; - } - - public ReduceResult(long count, InternalAggregations reducedAggregations) { - this.count = count; - this.matches = null; - this.reducedAggregations = reducedAggregations; - } - - public long count() { - return count; - } - - public PercolateResponse.Match[] matches() { - return matches; - } - - public InternalAggregations reducedAggregations() { - return reducedAggregations; - } - } - - -} diff --git a/core/src/main/java/org/elasticsearch/percolator/SingleDocumentPercolatorIndex.java b/core/src/main/java/org/elasticsearch/percolator/SingleDocumentPercolatorIndex.java deleted file mode 100644 index 1d5268e3794..00000000000 --- a/core/src/main/java/org/elasticsearch/percolator/SingleDocumentPercolatorIndex.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - - -package org.elasticsearch.percolator; - -import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.index.IndexOptions; -import org.apache.lucene.index.IndexableField; -import org.apache.lucene.index.memory.MemoryIndex; -import org.apache.lucene.util.CloseableThreadLocal; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.mapper.internal.UidFieldMapper; - -import java.io.IOException; - -/** - * Implementation of {@link PercolatorIndex} that can only hold a single Lucene document - * and is optimized for that - */ -class SingleDocumentPercolatorIndex implements PercolatorIndex { - - private final CloseableThreadLocal cache; - - SingleDocumentPercolatorIndex(CloseableThreadLocal cache) { - this.cache = cache; - } - - @Override - public void prepare(PercolateContext context, ParsedDocument parsedDocument) { - MemoryIndex memoryIndex = cache.get(); - for (IndexableField field : parsedDocument.rootDoc().getFields()) { - Analyzer analyzer = context.analysisService().defaultIndexAnalyzer(); - DocumentMapper documentMapper = context.mapperService().documentMapper(parsedDocument.type()); - if (documentMapper != null && documentMapper.mappers().getMapper(field.name()) != null) { - analyzer = documentMapper.mappers().indexAnalyzer(); - } - if (field.fieldType().indexOptions() == IndexOptions.NONE && field.name().equals(UidFieldMapper.NAME)) { - continue; - } - try { - // TODO: instead of passing null here, we can have a CTL> and pass previous, - // like the indexer does - try (TokenStream tokenStream = field.tokenStream(analyzer, null)) { - if (tokenStream != null) { - memoryIndex.addField(field.name(), tokenStream, field.boost()); - } - } - } catch (Exception e) { - throw new ElasticsearchException("Failed to create token stream for [" + field.name() + "]", e); - } - } - context.initialize(new DocEngineSearcher(memoryIndex), parsedDocument); - } - - private class DocEngineSearcher extends Engine.Searcher { - - private final MemoryIndex memoryIndex; - - public DocEngineSearcher(MemoryIndex memoryIndex) { - super("percolate", memoryIndex.createSearcher()); - this.memoryIndex = memoryIndex; - } - - @Override - public void close() { - try { - this.reader().close(); - memoryIndex.reset(); - } catch (IOException e) { - throw new ElasticsearchException("failed to close percolator in-memory index", e); - } - } - } -} diff --git a/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java b/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java index 767f6d42179..18e996f6f37 100644 --- a/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java +++ b/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java @@ -19,16 +19,19 @@ package org.elasticsearch.plugins; +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import org.apache.lucene.util.Constants; import org.apache.lucene.util.IOUtils; import org.elasticsearch.Build; import org.elasticsearch.Version; import org.elasticsearch.bootstrap.JarHell; -import org.elasticsearch.common.cli.CliTool; -import org.elasticsearch.common.cli.Terminal; -import org.elasticsearch.common.cli.UserError; +import org.elasticsearch.cli.Command; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserError; import org.elasticsearch.common.hash.MessageDigests; import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import java.io.BufferedReader; @@ -44,10 +47,13 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardCopyOption; import java.nio.file.attribute.PosixFileAttributeView; +import java.nio.file.attribute.PosixFileAttributes; import java.nio.file.attribute.PosixFilePermission; +import java.nio.file.attribute.PosixFilePermissions; import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; +import java.util.LinkedHashSet; import java.util.List; import java.util.Locale; import java.util.Set; @@ -55,7 +61,7 @@ import java.util.zip.ZipEntry; import java.util.zip.ZipInputStream; import static java.util.Collections.unmodifiableSet; -import static org.elasticsearch.common.cli.Terminal.Verbosity.VERBOSE; +import static org.elasticsearch.cli.Terminal.Verbosity.VERBOSE; import static org.elasticsearch.common.util.set.Sets.newHashSet; /** @@ -63,9 +69,9 @@ import static org.elasticsearch.common.util.set.Sets.newHashSet; * * The install command takes a plugin id, which may be any of the following: *
        - *
      • An official elasticsearch plugin name
      • - *
      • Maven coordinates to a plugin zip
      • - *
      • A URL to a plugin zip
      • + *
      • An official elasticsearch plugin name
      • + *
      • Maven coordinates to a plugin zip
      • + *
      • A URL to a plugin zip
      • *
      * * Plugins are packaged as zip files. Each packaged plugin must contain a @@ -74,9 +80,9 @@ import static org.elasticsearch.common.util.set.Sets.newHashSet; * The installation process first extracts the plugin files into a temporary * directory in order to verify the plugin satisfies the following requirements: *
        - *
      • Jar hell does not exist, either between the plugin's own jars, or with elasticsearch
      • - *
      • The plugin is not a module already provided with elasticsearch
      • - *
      • If the plugin contains extra security permissions, the policy file is validated
      • + *
      • Jar hell does not exist, either between the plugin's own jars, or with elasticsearch
      • + *
      • The plugin is not a module already provided with elasticsearch
      • + *
      • If the plugin contains extra security permissions, the policy file is validated
      • *
      *

      * A plugin may also contain an optional {@code bin} directory which contains scripts. The @@ -88,48 +94,76 @@ import static org.elasticsearch.common.util.set.Sets.newHashSet; * elasticsearch config directory, using the name of the plugin. If any files to be installed * already exist, they will be skipped. */ -class InstallPluginCommand extends CliTool.Command { +class InstallPluginCommand extends Command { private static final String PROPERTY_SUPPORT_STAGING_URLS = "es.plugins.staging"; // TODO: make this a resource file generated by gradle static final Set MODULES = unmodifiableSet(newHashSet( - "lang-expression", - "lang-groovy")); + "ingest-grok", + "lang-expression", + "lang-groovy", + "lang-painless", + "reindex")); // TODO: make this a resource file generated by gradle - static final Set OFFICIAL_PLUGINS = unmodifiableSet(newHashSet( - "analysis-icu", - "analysis-kuromoji", - "analysis-phonetic", - "analysis-smartcn", - "analysis-stempel", - "delete-by-query", - "discovery-azure", - "discovery-ec2", - "discovery-gce", - "lang-javascript", - "lang-painless", - "lang-python", - "mapper-attachments", - "mapper-murmur3", - "mapper-size", - "repository-azure", - "repository-hdfs", - "repository-s3", - "store-smb")); + static final Set OFFICIAL_PLUGINS = unmodifiableSet(new LinkedHashSet<>(Arrays.asList( + "analysis-icu", + "analysis-kuromoji", + "analysis-phonetic", + "analysis-smartcn", + "analysis-stempel", + "delete-by-query", + "discovery-azure", + "discovery-ec2", + "discovery-gce", + "ingest-attachment", + "ingest-geoip", + "lang-javascript", + "lang-python", + "mapper-attachments", + "mapper-murmur3", + "mapper-size", + "repository-azure", + "repository-hdfs", + "repository-s3", + "store-smb"))); - private final String pluginId; - private final boolean batch; + private final Environment env; + private final OptionSpec batchOption; + private final OptionSpec arguments; - InstallPluginCommand(Terminal terminal, String pluginId, boolean batch) { - super(terminal); - this.pluginId = pluginId; - this.batch = batch; + InstallPluginCommand(Environment env) { + super("Install a plugin"); + this.env = env; + this.batchOption = parser.acceptsAll(Arrays.asList("b", "batch"), + "Enable batch mode explicitly, automatic confirmation of security permission"); + this.arguments = parser.nonOptions("plugin id"); } @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception { + protected void printAdditionalHelp(Terminal terminal) { + terminal.println("The following official plugins may be installed by name:"); + for (String plugin : OFFICIAL_PLUGINS) { + terminal.println(" " + plugin); + } + terminal.println(""); + } + + @Override + protected void execute(Terminal terminal, OptionSet options) throws Exception { + // TODO: in jopt-simple 5.0 we can enforce a min/max number of positional args + List args = arguments.values(options); + if (args.size() != 1) { + throw new UserError(ExitCodes.USAGE, "Must supply a single plugin id argument"); + } + String pluginId = args.get(0); + boolean isBatch = options.has(batchOption) || System.console() == null; + execute(terminal, pluginId, isBatch); + } + + // pkg private for testing + void execute(Terminal terminal, String pluginId, boolean isBatch) throws Exception { // TODO: remove this leniency!! is it needed anymore? if (Files.exists(env.pluginsFile()) == false) { @@ -137,24 +171,22 @@ class InstallPluginCommand extends CliTool.Command { Files.createDirectory(env.pluginsFile()); } - Path pluginZip = download(pluginId, env.tmpFile()); + Path pluginZip = download(terminal, pluginId, env.tmpFile()); Path extractedZip = unzip(pluginZip, env.pluginsFile()); - install(extractedZip, env); - - return CliTool.ExitStatus.OK; + install(terminal, isBatch, extractedZip); } /** Downloads the plugin and returns the file it was downloaded to. */ - private Path download(String pluginId, Path tmpDir) throws Exception { + private Path download(Terminal terminal, String pluginId, Path tmpDir) throws Exception { if (OFFICIAL_PLUGINS.contains(pluginId)) { final String version = Version.CURRENT.toString(); final String url; if (System.getProperty(PROPERTY_SUPPORT_STAGING_URLS, "false").equals("true")) { url = String.format(Locale.ROOT, "https://download.elastic.co/elasticsearch/staging/%1$s-%2$s/org/elasticsearch/plugin/%3$s/%1$s/%3$s-%1$s.zip", - version, Build.CURRENT.shortHash(), pluginId); + version, Build.CURRENT.shortHash(), pluginId); } else { url = String.format(Locale.ROOT, "https://download.elastic.co/elasticsearch/release/org/elasticsearch/plugin/%1$s/%2$s/%1$s-%2$s.zip", - pluginId, version); + pluginId, version); } terminal.println("-> Downloading " + pluginId + " from elastic"); return downloadZipAndChecksum(url, tmpDir); @@ -164,7 +196,7 @@ class InstallPluginCommand extends CliTool.Command { String[] coordinates = pluginId.split(":"); if (coordinates.length == 3 && pluginId.contains("/") == false) { String mavenUrl = String.format(Locale.ROOT, "https://repo1.maven.org/maven2/%1$s/%2$s/%3$s/%2$s-%3$s.zip", - coordinates[0].replace(".", "/") /* groupId */, coordinates[1] /* artifactId */, coordinates[2] /* version */); + coordinates[0].replace(".", "/") /* groupId */, coordinates[1] /* artifactId */, coordinates[2] /* version */); terminal.println("-> Downloading " + pluginId + " from maven central"); return downloadZipAndChecksum(mavenUrl, tmpDir); } @@ -195,14 +227,14 @@ class InstallPluginCommand extends CliTool.Command { BufferedReader checksumReader = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8)); expectedChecksum = checksumReader.readLine(); if (checksumReader.readLine() != null) { - throw new UserError(CliTool.ExitStatus.IO_ERROR, "Invalid checksum file at " + checksumUrl); + throw new UserError(ExitCodes.IO_ERROR, "Invalid checksum file at " + checksumUrl); } } byte[] zipbytes = Files.readAllBytes(zip); String gotChecksum = MessageDigests.toHexString(MessageDigests.sha1().digest(zipbytes)); if (expectedChecksum.equals(gotChecksum) == false) { - throw new UserError(CliTool.ExitStatus.IO_ERROR, "SHA1 mismatch, expected " + expectedChecksum + " but got " + gotChecksum); + throw new UserError(ExitCodes.IO_ERROR, "SHA1 mismatch, expected " + expectedChecksum + " but got " + gotChecksum); } return zip; @@ -210,7 +242,20 @@ class InstallPluginCommand extends CliTool.Command { private Path unzip(Path zip, Path pluginsDir) throws IOException, UserError { // unzip plugin to a staging temp dir - Path target = Files.createTempDirectory(pluginsDir, ".installing-"); + final Path target; + if (Constants.WINDOWS) { + target = Files.createTempDirectory(pluginsDir, ".installing-"); + } else { + Set perms = new HashSet<>(); + perms.add(PosixFilePermission.OWNER_EXECUTE); + perms.add(PosixFilePermission.OWNER_READ); + perms.add(PosixFilePermission.OWNER_WRITE); + perms.add(PosixFilePermission.GROUP_READ); + perms.add(PosixFilePermission.GROUP_EXECUTE); + perms.add(PosixFilePermission.OTHERS_READ); + perms.add(PosixFilePermission.OTHERS_EXECUTE); + target = Files.createTempDirectory(pluginsDir, ".installing-", PosixFilePermissions.asFileAttribute(perms)); + } Files.createDirectories(target); boolean hasEsDir = false; @@ -225,7 +270,14 @@ class InstallPluginCommand extends CliTool.Command { } hasEsDir = true; Path targetFile = target.resolve(entry.getName().substring("elasticsearch/".length())); - // TODO: handle name being an absolute path + + // Using the entry name as a path can result in an entry outside of the plugin dir, either if the + // name starts with the root of the filesystem, or it is a relative entry like ../whatever. + // This check attempts to identify both cases by first normalizing the path (which removes foo/..) + // and ensuring the normalized entry is still rooted with the target plugin directory. + if (targetFile.normalize().startsWith(target) == false) { + throw new IOException("Zip contains entry name '" + entry.getName() + "' resolving outside of plugin directory"); + } // be on the safe side: do not rely on that directories are always extracted // before their children (although this makes sense, but is it guaranteed?) @@ -233,7 +285,7 @@ class InstallPluginCommand extends CliTool.Command { if (entry.isDirectory() == false) { try (OutputStream out = Files.newOutputStream(targetFile)) { int len; - while((len = zipInput.read(buffer)) >= 0) { + while ((len = zipInput.read(buffer)) >= 0) { out.write(buffer, 0, len); } } @@ -244,13 +296,13 @@ class InstallPluginCommand extends CliTool.Command { Files.delete(zip); if (hasEsDir == false) { IOUtils.rm(target); - throw new UserError(CliTool.ExitStatus.DATA_ERROR, "`elasticsearch` directory is missing in the plugin zip"); + throw new UserError(ExitCodes.DATA_ERROR, "`elasticsearch` directory is missing in the plugin zip"); } return target; } /** Load information about the plugin, and verify it can be installed with no errors. */ - private PluginInfo verify(Path pluginRoot, Environment env) throws Exception { + private PluginInfo verify(Terminal terminal, Path pluginRoot, boolean isBatch) throws Exception { // read and validate the plugin descriptor PluginInfo info = PluginInfo.readFromProperties(pluginRoot); terminal.println(VERBOSE, info.toString()); @@ -258,7 +310,7 @@ class InstallPluginCommand extends CliTool.Command { // don't let luser install plugin as a module... // they might be unavoidably in maven central and are packaged up the same way) if (MODULES.contains(info.getName())) { - throw new UserError(CliTool.ExitStatus.USAGE, "plugin '" + info.getName() + "' cannot be installed like this, it is a system module"); + throw new UserError(ExitCodes.USAGE, "plugin '" + info.getName() + "' cannot be installed like this, it is a system module"); } // check for jar hell before any copying @@ -268,7 +320,7 @@ class InstallPluginCommand extends CliTool.Command { // if it exists, confirm or warn the user Path policy = pluginRoot.resolve(PluginInfo.ES_PLUGIN_POLICY); if (Files.exists(policy)) { - PluginSecurity.readPolicy(policy, terminal, env, batch); + PluginSecurity.readPolicy(policy, terminal, env, isBatch); } return info; @@ -305,16 +357,16 @@ class InstallPluginCommand extends CliTool.Command { * Installs the plugin from {@code tmpRoot} into the plugins dir. * If the plugin has a bin dir and/or a config dir, those are copied. */ - private void install(Path tmpRoot, Environment env) throws Exception { + private void install(Terminal terminal, boolean isBatch, Path tmpRoot) throws Exception { List deleteOnFailure = new ArrayList<>(); deleteOnFailure.add(tmpRoot); try { - PluginInfo info = verify(tmpRoot, env); + PluginInfo info = verify(terminal, tmpRoot, isBatch); final Path destination = env.pluginsFile().resolve(info.getName()); if (Files.exists(destination)) { - throw new UserError(CliTool.ExitStatus.USAGE, "plugin directory " + destination.toAbsolutePath() + " already exists. To update the plugin, uninstall it first using 'remove " + info.getName() + "' command"); + throw new UserError(ExitCodes.USAGE, "plugin directory " + destination.toAbsolutePath() + " already exists. To update the plugin, uninstall it first using 'remove " + info.getName() + "' command"); } Path tmpBinDir = tmpRoot.resolve("bin"); @@ -347,25 +399,27 @@ class InstallPluginCommand extends CliTool.Command { /** Copies the files from {@code tmpBinDir} into {@code destBinDir}, along with permissions from dest dirs parent. */ private void installBin(PluginInfo info, Path tmpBinDir, Path destBinDir) throws Exception { if (Files.isDirectory(tmpBinDir) == false) { - throw new UserError(CliTool.ExitStatus.IO_ERROR, "bin in plugin " + info.getName() + " is not a directory"); + throw new UserError(ExitCodes.IO_ERROR, "bin in plugin " + info.getName() + " is not a directory"); } Files.createDirectory(destBinDir); - // setup file attributes for the installed files to those of the parent dir Set perms = new HashSet<>(); - PosixFileAttributeView binAttrs = Files.getFileAttributeView(destBinDir.getParent(), PosixFileAttributeView.class); - if (binAttrs != null) { - perms = new HashSet<>(binAttrs.readAttributes().permissions()); - // setting execute bits, since this just means "the file is executable", and actual execution requires read - perms.add(PosixFilePermission.OWNER_EXECUTE); - perms.add(PosixFilePermission.GROUP_EXECUTE); - perms.add(PosixFilePermission.OTHERS_EXECUTE); + if (Constants.WINDOWS == false) { + // setup file attributes for the installed files to those of the parent dir + PosixFileAttributeView binAttrs = Files.getFileAttributeView(destBinDir.getParent(), PosixFileAttributeView.class); + if (binAttrs != null) { + perms = new HashSet<>(binAttrs.readAttributes().permissions()); + // setting execute bits, since this just means "the file is executable", and actual execution requires read + perms.add(PosixFilePermission.OWNER_EXECUTE); + perms.add(PosixFilePermission.GROUP_EXECUTE); + perms.add(PosixFilePermission.OTHERS_EXECUTE); + } } - try (DirectoryStream stream = Files.newDirectoryStream(tmpBinDir)) { + try (DirectoryStream stream = Files.newDirectoryStream(tmpBinDir)) { for (Path srcFile : stream) { if (Files.isDirectory(srcFile)) { - throw new UserError(CliTool.ExitStatus.DATA_ERROR, "Directories not allowed in bin dir for plugin " + info.getName() + ", found " + srcFile.getFileName()); + throw new UserError(ExitCodes.DATA_ERROR, "Directories not allowed in bin dir for plugin " + info.getName() + ", found " + srcFile.getFileName()); } Path destFile = destBinDir.resolve(tmpBinDir.relativize(srcFile)); @@ -386,24 +440,44 @@ class InstallPluginCommand extends CliTool.Command { */ private void installConfig(PluginInfo info, Path tmpConfigDir, Path destConfigDir) throws Exception { if (Files.isDirectory(tmpConfigDir) == false) { - throw new UserError(CliTool.ExitStatus.IO_ERROR, "config in plugin " + info.getName() + " is not a directory"); + throw new UserError(ExitCodes.IO_ERROR, "config in plugin " + info.getName() + " is not a directory"); } // create the plugin's config dir "if necessary" Files.createDirectories(destConfigDir); - try (DirectoryStream stream = Files.newDirectoryStream(tmpConfigDir)) { + final PosixFileAttributes destConfigDirAttributes; + if (Constants.WINDOWS) { + destConfigDirAttributes = null; + } else { + destConfigDirAttributes = + Files.getFileAttributeView(destConfigDir.getParent(), PosixFileAttributeView.class).readAttributes(); + setOwnerGroup(destConfigDir, destConfigDirAttributes); + + } + + try (DirectoryStream stream = Files.newDirectoryStream(tmpConfigDir)) { for (Path srcFile : stream) { if (Files.isDirectory(srcFile)) { - throw new UserError(CliTool.ExitStatus.DATA_ERROR, "Directories not allowed in config dir for plugin " + info.getName()); + throw new UserError(ExitCodes.DATA_ERROR, "Directories not allowed in config dir for plugin " + info.getName()); } Path destFile = destConfigDir.resolve(tmpConfigDir.relativize(srcFile)); if (Files.exists(destFile) == false) { Files.copy(srcFile, destFile); + if (Constants.WINDOWS == false) { + setOwnerGroup(destFile, destConfigDirAttributes); + } } } } IOUtils.rm(tmpConfigDir); // clean up what we just copied } + + private static void setOwnerGroup(Path path, PosixFileAttributes attributes) throws IOException { + PosixFileAttributeView fileAttributeView = Files.getFileAttributeView(path, PosixFileAttributeView.class); + fileAttributeView.setOwner(attributes.owner()); + fileAttributeView.setGroup(attributes.group()); + } + } diff --git a/core/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java b/core/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java index 6abed4e6bc2..953e698a4c2 100644 --- a/core/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java +++ b/core/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java @@ -24,22 +24,25 @@ import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; -import org.elasticsearch.common.cli.CliTool; -import org.elasticsearch.common.cli.Terminal; -import org.elasticsearch.common.settings.Settings; +import joptsimple.OptionSet; +import org.elasticsearch.cli.Command; +import org.elasticsearch.cli.Terminal; import org.elasticsearch.env.Environment; /** * A command for the plugin cli to list plugins installed in elasticsearch. */ -class ListPluginsCommand extends CliTool.Command { +class ListPluginsCommand extends Command { - ListPluginsCommand(Terminal terminal) { - super(terminal); + private final Environment env; + + ListPluginsCommand(Environment env) { + super("Lists installed elasticsearch plugins"); + this.env = env; } @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception { + protected void execute(Terminal terminal, OptionSet options) throws Exception { if (Files.exists(env.pluginsFile()) == false) { throw new IOException("Plugins directory missing: " + env.pluginsFile()); } @@ -50,7 +53,5 @@ class ListPluginsCommand extends CliTool.Command { terminal.println(plugin.getFileName().toString()); } } - - return CliTool.ExitStatus.OK; } } diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginCli.java b/core/src/main/java/org/elasticsearch/plugins/PluginCli.java index df402e6359d..be06ea7db1c 100644 --- a/core/src/main/java/org/elasticsearch/plugins/PluginCli.java +++ b/core/src/main/java/org/elasticsearch/plugins/PluginCli.java @@ -19,41 +19,24 @@ package org.elasticsearch.plugins; -import org.apache.commons.cli.CommandLine; -import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.cli.CliTool; -import org.elasticsearch.common.cli.CliToolConfig; -import org.elasticsearch.common.cli.Terminal; +import org.elasticsearch.cli.MultiCommand; +import org.elasticsearch.cli.Terminal; import org.elasticsearch.common.logging.LogConfigurator; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.node.internal.InternalSettingsPreparer; -import java.util.Locale; - -import static org.elasticsearch.common.cli.CliToolConfig.Builder.cmd; -import static org.elasticsearch.common.cli.CliToolConfig.Builder.option; - /** * A cli tool for adding, removing and listing plugins for elasticsearch. */ -public class PluginCli extends CliTool { +public class PluginCli extends MultiCommand { - // commands - private static final String LIST_CMD_NAME = "list"; - private static final String INSTALL_CMD_NAME = "install"; - private static final String REMOVE_CMD_NAME = "remove"; - - // usage config - private static final CliToolConfig.Cmd LIST_CMD = cmd(LIST_CMD_NAME, ListPluginsCommand.class).build(); - private static final CliToolConfig.Cmd INSTALL_CMD = cmd(INSTALL_CMD_NAME, InstallPluginCommand.class) - .options(option("b", "batch").required(false)) - .build(); - private static final CliToolConfig.Cmd REMOVE_CMD = cmd(REMOVE_CMD_NAME, RemovePluginCommand.class).build(); - - static final CliToolConfig CONFIG = CliToolConfig.config("plugin", PluginCli.class) - .cmds(LIST_CMD, INSTALL_CMD, REMOVE_CMD) - .build(); + public PluginCli(Environment env) { + super("A tool for managing installed elasticsearch plugins"); + subcommands.put("list", new ListPluginsCommand(env)); + subcommands.put("install", new InstallPluginCommand(env)); + subcommands.put("remove", new RemovePluginCommand(env)); + } public static void main(String[] args) throws Exception { // initialize default for es.logger.level because we will not read the logging.yml @@ -64,61 +47,13 @@ public class PluginCli extends CliTool { // executed there is no way of knowing where the logfiles should be placed. For example, if elasticsearch // is run as service then the logs should be at /var/log/elasticsearch but when started from the tar they should be at es.home/logs. // Therefore we print to Terminal. - Environment env = InternalSettingsPreparer.prepareEnvironment(Settings.builder() + Environment loggingEnvironment = InternalSettingsPreparer.prepareEnvironment(Settings.builder() .put("appender.terminal.type", "terminal") .put("rootLogger", "${es.logger.level}, terminal") .put("es.logger.level", loggerLevel) .build(), Terminal.DEFAULT); - // configure but do not read the logging conf file - LogConfigurator.configure(env.settings(), false); - int status = new PluginCli(Terminal.DEFAULT).execute(args).status(); - exit(status); - } - - @SuppressForbidden(reason = "Allowed to exit explicitly from #main()") - private static void exit(int status) { - System.exit(status); - } - - PluginCli(Terminal terminal) { - super(CONFIG, terminal); - } - - @Override - protected Command parse(String cmdName, CommandLine cli) throws Exception { - switch (cmdName.toLowerCase(Locale.ROOT)) { - case LIST_CMD_NAME: - return new ListPluginsCommand(terminal); - case INSTALL_CMD_NAME: - return parseInstallPluginCommand(cli); - case REMOVE_CMD_NAME: - return parseRemovePluginCommand(cli); - default: - assert false : "can't get here as cmd name is validated before this method is called"; - return exitCmd(ExitStatus.USAGE); - } - } - - private Command parseInstallPluginCommand(CommandLine cli) { - String[] args = cli.getArgs(); - if (args.length != 1) { - return exitCmd(ExitStatus.USAGE, terminal, "Must supply a single plugin id argument"); - } - - boolean batch = System.console() == null; - if (cli.hasOption("b")) { - batch = true; - } - - return new InstallPluginCommand(terminal, args[0], batch); - } - - private Command parseRemovePluginCommand(CommandLine cli) { - String[] args = cli.getArgs(); - if (args.length != 1) { - return exitCmd(ExitStatus.USAGE, terminal, "Must supply a single plugin name argument"); - } - - return new RemovePluginCommand(terminal, args[0]); + LogConfigurator.configure(loggingEnvironment.settings(), false); + Environment env = InternalSettingsPreparer.prepareEnvironment(Settings.EMPTY, Terminal.DEFAULT); + exit(new PluginCli(env).main(args, Terminal.DEFAULT)); } } diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginSecurity.java b/core/src/main/java/org/elasticsearch/plugins/PluginSecurity.java index b14bcaf2ff3..f9c3d1826c9 100644 --- a/core/src/main/java/org/elasticsearch/plugins/PluginSecurity.java +++ b/core/src/main/java/org/elasticsearch/plugins/PluginSecurity.java @@ -20,8 +20,8 @@ package org.elasticsearch.plugins; import org.apache.lucene.util.IOUtils; -import org.elasticsearch.common.cli.Terminal; -import org.elasticsearch.common.cli.Terminal.Verbosity; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.Terminal.Verbosity; import org.elasticsearch.env.Environment; import java.io.IOException; diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginsService.java b/core/src/main/java/org/elasticsearch/plugins/PluginsService.java index 3e36c5d8f09..cf953cd1529 100644 --- a/core/src/main/java/org/elasticsearch/plugins/PluginsService.java +++ b/core/src/main/java/org/elasticsearch/plugins/PluginsService.java @@ -37,6 +37,7 @@ import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexModule; @@ -71,7 +72,8 @@ public class PluginsService extends AbstractComponent { */ private final List> plugins; private final PluginsAndModules info; - public static final Setting> MANDATORY_SETTING = Setting.listSetting("plugin.mandatory", Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER); + public static final Setting> MANDATORY_SETTING = + Setting.listSetting("plugin.mandatory", Collections.emptyList(), Function.identity(), Property.NodeScope); private final Map> onModuleReferences; diff --git a/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java b/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java index 8ce1056bbfd..a3e6c375f83 100644 --- a/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java +++ b/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java @@ -19,40 +19,55 @@ package org.elasticsearch.plugins; -import org.apache.lucene.util.IOUtils; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.cli.CliTool; -import org.elasticsearch.common.cli.Terminal; -import org.elasticsearch.common.cli.UserError; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; - import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardCopyOption; import java.util.ArrayList; import java.util.List; -import static org.elasticsearch.common.cli.Terminal.Verbosity.VERBOSE; +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import org.apache.lucene.util.IOUtils; +import org.elasticsearch.cli.Command; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.UserError; +import org.elasticsearch.common.Strings; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.env.Environment; + +import static org.elasticsearch.cli.Terminal.Verbosity.VERBOSE; /** * A command for the plugin cli to remove a plugin from elasticsearch. */ -class RemovePluginCommand extends CliTool.Command { - private final String pluginName; +class RemovePluginCommand extends Command { - public RemovePluginCommand(Terminal terminal, String pluginName) { - super(terminal); - this.pluginName = pluginName; + private final Environment env; + private final OptionSpec arguments; + + RemovePluginCommand(Environment env) { + super("Removes a plugin from elasticsearch"); + this.env = env; + this.arguments = parser.nonOptions("plugin name"); } @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception { + protected void execute(Terminal terminal, OptionSet options) throws Exception { + // TODO: in jopt-simple 5.0 we can enforce a min/max number of positional args + List args = arguments.values(options); + if (args.size() != 1) { + throw new UserError(ExitCodes.USAGE, "Must supply a single plugin id argument"); + } + execute(terminal, args.get(0)); + } + + // pkg private for testing + void execute(Terminal terminal, String pluginName) throws Exception { terminal.println("-> Removing " + Strings.coalesceToEmpty(pluginName) + "..."); Path pluginDir = env.pluginsFile().resolve(pluginName); if (Files.exists(pluginDir) == false) { - throw new UserError(CliTool.ExitStatus.USAGE, "Plugin " + pluginName + " not found. Run 'plugin list' to get list of installed plugins."); + throw new UserError(ExitCodes.USAGE, "Plugin " + pluginName + " not found. Run 'plugin list' to get list of installed plugins."); } List pluginPaths = new ArrayList<>(); @@ -60,7 +75,7 @@ class RemovePluginCommand extends CliTool.Command { Path pluginBinDir = env.binFile().resolve(pluginName); if (Files.exists(pluginBinDir)) { if (Files.isDirectory(pluginBinDir) == false) { - throw new UserError(CliTool.ExitStatus.IO_ERROR, "Bin dir for " + pluginName + " is not a directory"); + throw new UserError(ExitCodes.IO_ERROR, "Bin dir for " + pluginName + " is not a directory"); } pluginPaths.add(pluginBinDir); terminal.println(VERBOSE, "Removing: " + pluginBinDir); @@ -72,7 +87,5 @@ class RemovePluginCommand extends CliTool.Command { pluginPaths.add(tmpPluginDir); IOUtils.rm(pluginPaths.toArray(new Path[pluginPaths.size()])); - - return CliTool.ExitStatus.OK; } } diff --git a/core/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/core/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index 6eb32cfb06f..da2d9688095 100644 --- a/core/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/core/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -22,7 +22,6 @@ package org.elasticsearch.repositories; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ack.ClusterStateUpdateRequest; @@ -31,6 +30,7 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.RepositoriesMetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Injector; diff --git a/core/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java b/core/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java index 91600488332..48ffbd5c1cb 100644 --- a/core/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java +++ b/core/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java @@ -22,9 +22,9 @@ package org.elasticsearch.repositories; import com.carrotsearch.hppc.ObjectContainer; import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 552e6aaf2e4..5d423552a56 100644 --- a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -458,7 +458,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent extends BlobStoreForm BytesReference bytes = write(obj); try (ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream()) { final String resourceDesc = "ChecksumBlobStoreFormat.writeBlob(blob=\"" + blobName + "\")"; - try (OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput(resourceDesc, byteArrayOutputStream, BUFFER_SIZE)) { + try (OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput(resourceDesc, blobName, byteArrayOutputStream, BUFFER_SIZE)) { CodecUtil.writeHeader(indexOutput, codec, VERSION); try (OutputStream indexOutputOutputStream = new IndexOutputOutputStream(indexOutput) { @Override diff --git a/core/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java b/core/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java index 0aa62225479..56e7e08c2c3 100644 --- a/core/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java +++ b/core/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.blobstore.fs.FsBlobStore; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.env.Environment; import org.elasticsearch.index.snapshots.IndexShardRepository; @@ -51,12 +52,17 @@ public class FsRepository extends BlobStoreRepository { public final static String TYPE = "fs"; - public static final Setting LOCATION_SETTING = new Setting<>("location", "", Function.identity(), false, Setting.Scope.CLUSTER); - public static final Setting REPOSITORIES_LOCATION_SETTING = new Setting<>("repositories.fs.location", LOCATION_SETTING, Function.identity(), false, Setting.Scope.CLUSTER); - public static final Setting CHUNK_SIZE_SETTING = Setting.byteSizeSetting("chunk_size", "-1", false, Setting.Scope.CLUSTER); - public static final Setting REPOSITORIES_CHUNK_SIZE_SETTING = Setting.byteSizeSetting("repositories.fs.chunk_size", "-1", false, Setting.Scope.CLUSTER); - public static final Setting COMPRESS_SETTING = Setting.boolSetting("compress", false, false, Setting.Scope.CLUSTER); - public static final Setting REPOSITORIES_COMPRESS_SETTING = Setting.boolSetting("repositories.fs.compress", false, false, Setting.Scope.CLUSTER); + public static final Setting LOCATION_SETTING = + new Setting<>("location", "", Function.identity(), Property.NodeScope); + public static final Setting REPOSITORIES_LOCATION_SETTING = + new Setting<>("repositories.fs.location", LOCATION_SETTING, Function.identity(), Property.NodeScope); + public static final Setting CHUNK_SIZE_SETTING = + Setting.byteSizeSetting("chunk_size", "-1", Property.NodeScope); + public static final Setting REPOSITORIES_CHUNK_SIZE_SETTING = + Setting.byteSizeSetting("repositories.fs.chunk_size", "-1", Property.NodeScope); + public static final Setting COMPRESS_SETTING = Setting.boolSetting("compress", false, Property.NodeScope); + public static final Setting REPOSITORIES_COMPRESS_SETTING = + Setting.boolSetting("repositories.fs.compress", false, Property.NodeScope); private final FsBlobStore blobStore; diff --git a/core/src/main/java/org/elasticsearch/repositories/uri/URLIndexShardRepository.java b/core/src/main/java/org/elasticsearch/repositories/uri/URLIndexShardRepository.java index ab9ec72463a..616a36d5066 100644 --- a/core/src/main/java/org/elasticsearch/repositories/uri/URLIndexShardRepository.java +++ b/core/src/main/java/org/elasticsearch/repositories/uri/URLIndexShardRepository.java @@ -19,7 +19,7 @@ package org.elasticsearch.repositories.uri; -import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardRepository; diff --git a/core/src/main/java/org/elasticsearch/repositories/uri/URLRepository.java b/core/src/main/java/org/elasticsearch/repositories/uri/URLRepository.java index 2d15db245aa..77d4f1cc816 100644 --- a/core/src/main/java/org/elasticsearch/repositories/uri/URLRepository.java +++ b/core/src/main/java/org/elasticsearch/repositories/uri/URLRepository.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.blobstore.url.URLBlobStore; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.util.URIPattern; import org.elasticsearch.env.Environment; import org.elasticsearch.index.snapshots.IndexShardRepository; @@ -55,19 +56,22 @@ public class URLRepository extends BlobStoreRepository { public final static String TYPE = "url"; - public static final Setting> SUPPORTED_PROTOCOLS_SETTING = Setting.listSetting("repositories.url.supported_protocols", - Arrays.asList("http", "https", "ftp", "file", "jar"), Function.identity(), false, Setting.Scope.CLUSTER); + public static final Setting> SUPPORTED_PROTOCOLS_SETTING = + Setting.listSetting("repositories.url.supported_protocols", Arrays.asList("http", "https", "ftp", "file", "jar"), + Function.identity(), Property.NodeScope); - public static final Setting> ALLOWED_URLS_SETTING = Setting.listSetting("repositories.url.allowed_urls", - Collections.emptyList(), URIPattern::new, false, Setting.Scope.CLUSTER); + public static final Setting> ALLOWED_URLS_SETTING = + Setting.listSetting("repositories.url.allowed_urls", Collections.emptyList(), URIPattern::new, Property.NodeScope); - public static final Setting URL_SETTING = new Setting<>("url", "http:", URLRepository::parseURL, false, Setting.Scope.CLUSTER); - public static final Setting REPOSITORIES_URL_SETTING = new Setting<>("repositories.url.url", (s) -> s.get("repositories.uri.url", "http:"), - URLRepository::parseURL, false, Setting.Scope.CLUSTER); + public static final Setting URL_SETTING = new Setting<>("url", "http:", URLRepository::parseURL, Property.NodeScope); + public static final Setting REPOSITORIES_URL_SETTING = + new Setting<>("repositories.url.url", (s) -> s.get("repositories.uri.url", "http:"), URLRepository::parseURL, + Property.NodeScope); - public static final Setting LIST_DIRECTORIES_SETTING = Setting.boolSetting("list_directories", true, false, Setting.Scope.CLUSTER); - public static final Setting REPOSITORIES_LIST_DIRECTORIES_SETTING = Setting.boolSetting("repositories.uri.list_directories", true, - false, Setting.Scope.CLUSTER); + public static final Setting LIST_DIRECTORIES_SETTING = + Setting.boolSetting("list_directories", true, Property.NodeScope); + public static final Setting REPOSITORIES_LIST_DIRECTORIES_SETTING = + Setting.boolSetting("repositories.uri.list_directories", true, Property.NodeScope); private final List supportedProtocols; diff --git a/core/src/main/java/org/elasticsearch/rest/BaseRestHandler.java b/core/src/main/java/org/elasticsearch/rest/BaseRestHandler.java index 60b3ccce930..b406dfca545 100644 --- a/core/src/main/java/org/elasticsearch/rest/BaseRestHandler.java +++ b/core/src/main/java/org/elasticsearch/rest/BaseRestHandler.java @@ -23,6 +23,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; /** @@ -34,7 +35,8 @@ import org.elasticsearch.common.settings.Settings; * {@link org.elasticsearch.rest.RestController#registerRelevantHeaders(String...)} */ public abstract class BaseRestHandler extends AbstractComponent implements RestHandler { - public static final Setting MULTI_ALLOW_EXPLICIT_INDEX = Setting.boolSetting("rest.action.multi.allow_explicit_index", true, false, Setting.Scope.CLUSTER); + public static final Setting MULTI_ALLOW_EXPLICIT_INDEX = + Setting.boolSetting("rest.action.multi.allow_explicit_index", true, Property.NodeScope); private final Client client; protected final ParseFieldMatcher parseFieldMatcher; diff --git a/core/src/main/java/org/elasticsearch/rest/BytesRestResponse.java b/core/src/main/java/org/elasticsearch/rest/BytesRestResponse.java index ac8eadade0b..52f624849fc 100644 --- a/core/src/main/java/org/elasticsearch/rest/BytesRestResponse.java +++ b/core/src/main/java/org/elasticsearch/rest/BytesRestResponse.java @@ -126,7 +126,11 @@ public class BytesRestResponse extends RestResponse { if (channel.request().paramAsBoolean("error_trace", !ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT)) { params = new ToXContent.DelegatingMapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE, "false"), channel.request()); } else { - SUPPRESSED_ERROR_LOGGER.info("{} Params: {}", t, channel.request().path(), channel.request().params()); + if (status.getStatus() < 500) { + SUPPRESSED_ERROR_LOGGER.debug("{} Params: {}", t, channel.request().path(), channel.request().params()); + } else { + SUPPRESSED_ERROR_LOGGER.warn("{} Params: {}", t, channel.request().path(), channel.request().params()); + } params = channel.request(); } builder.field("error"); diff --git a/core/src/main/java/org/elasticsearch/rest/RestController.java b/core/src/main/java/org/elasticsearch/rest/RestController.java index 64e21002d8c..0cbfdd0ef1b 100644 --- a/core/src/main/java/org/elasticsearch/rest/RestController.java +++ b/core/src/main/java/org/elasticsearch/rest/RestController.java @@ -176,7 +176,7 @@ public class RestController extends AbstractLifecycleComponent { try { channel.sendResponse(new BytesRestResponse(channel, e)); } catch (Throwable e1) { - logger.error("failed to send failure response for uri [" + request.uri() + "]", e1); + logger.error("failed to send failure response for uri [{}]", e1, request.uri()); } } } else { @@ -275,7 +275,7 @@ public class RestController extends AbstractLifecycleComponent { try { channel.sendResponse(new BytesRestResponse(channel, e)); } catch (IOException e1) { - logger.error("Failed to send failure response for uri [" + request.uri() + "]", e1); + logger.error("Failed to send failure response for uri [{}]", e1, request.uri()); } } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/info/RestNodesInfoAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/info/RestNodesInfoAction.java index f11efeca87d..bd6637cb788 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/info/RestNodesInfoAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/info/RestNodesInfoAction.java @@ -48,7 +48,7 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; public class RestNodesInfoAction extends BaseRestHandler { private final SettingsFilter settingsFilter; - private final static Set ALLOWED_METRICS = Sets.newHashSet("http", "jvm", "os", "plugins", "process", "settings", "thread_pool", "transport"); + private final static Set ALLOWED_METRICS = Sets.newHashSet("http", "jvm", "os", "plugins", "process", "settings", "thread_pool", "transport", "ingest"); @Inject public RestNodesInfoAction(Settings settings, RestController controller, Client client, SettingsFilter settingsFilter) { @@ -101,6 +101,7 @@ public class RestNodesInfoAction extends BaseRestHandler { nodesInfoRequest.transport(metrics.contains("transport")); nodesInfoRequest.http(metrics.contains("http")); nodesInfoRequest.plugins(metrics.contains("plugins")); + nodesInfoRequest.ingest(metrics.contains("ingest")); } settingsFilter.addFilterSettingParams(request); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/stats/RestNodesStatsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/stats/RestNodesStatsAction.java index fb8e9c63740..1e2aece1646 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/stats/RestNodesStatsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/stats/RestNodesStatsAction.java @@ -81,6 +81,7 @@ public class RestNodesStatsAction extends BaseRestHandler { nodesStatsRequest.breaker(metrics.contains("breaker")); nodesStatsRequest.script(metrics.contains("script")); nodesStatsRequest.discovery(metrics.contains("discovery")); + nodesStatsRequest.ingest(metrics.contains("ingest")); // check for index specific metrics if (metrics.contains("indices")) { @@ -113,6 +114,6 @@ public class RestNodesStatsAction extends BaseRestHandler { nodesStatsRequest.indices().includeSegmentFileSizes(true); } - client.admin().cluster().nodesStats(nodesStatsRequest, new RestToXContentListener(channel)); + client.admin().cluster().nodesStats(nodesStatsRequest, new RestToXContentListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/tasks/RestCancelTasksAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/tasks/RestCancelTasksAction.java index 99cdc16253a..658090bb6db 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/tasks/RestCancelTasksAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/tasks/RestCancelTasksAction.java @@ -52,10 +52,10 @@ public class RestCancelTasksAction extends BaseRestHandler { TaskId parentTaskId = new TaskId(request.param("parent_task_id")); CancelTasksRequest cancelTasksRequest = new CancelTasksRequest(); - cancelTasksRequest.taskId(taskId); - cancelTasksRequest.nodesIds(nodesIds); - cancelTasksRequest.actions(actions); - cancelTasksRequest.parentTaskId(parentTaskId); + cancelTasksRequest.setTaskId(taskId); + cancelTasksRequest.setNodesIds(nodesIds); + cancelTasksRequest.setActions(actions); + cancelTasksRequest.setParentTaskId(parentTaskId); client.admin().cluster().cancelTasks(cancelTasksRequest, new RestToXContentListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/tasks/RestListTasksAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/tasks/RestListTasksAction.java index 992267fa8a5..9a9d1991298 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/tasks/RestListTasksAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/tasks/RestListTasksAction.java @@ -50,13 +50,15 @@ public class RestListTasksAction extends BaseRestHandler { TaskId taskId = new TaskId(request.param("taskId")); String[] actions = Strings.splitStringByCommaToArray(request.param("actions")); TaskId parentTaskId = new TaskId(request.param("parent_task_id")); + boolean waitForCompletion = request.paramAsBoolean("wait_for_completion", false); ListTasksRequest listTasksRequest = new ListTasksRequest(); - listTasksRequest.taskId(taskId); - listTasksRequest.nodesIds(nodesIds); - listTasksRequest.detailed(detailed); - listTasksRequest.actions(actions); - listTasksRequest.parentTaskId(parentTaskId); + listTasksRequest.setTaskId(taskId); + listTasksRequest.setNodesIds(nodesIds); + listTasksRequest.setDetailed(detailed); + listTasksRequest.setActions(actions); + listTasksRequest.setParentTaskId(parentTaskId); + listTasksRequest.setWaitForCompletion(waitForCompletion); client.admin().cluster().listTasks(listTasksRequest, new RestToXContentListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java index 4e90a6a3a85..a3d0cc84559 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java @@ -144,8 +144,12 @@ public class RestAnalyzeAction extends BaseRestHandler { charFilters.add(parser.text()); } analyzeRequest.charFilters(charFilters.toArray(new String[charFilters.size()])); - } else if (parseFieldMatcher.match(currentFieldName, Fields.EXPLAIN) && token == XContentParser.Token.VALUE_BOOLEAN) { - analyzeRequest.explain(parser.booleanValue()); + } else if (parseFieldMatcher.match(currentFieldName, Fields.EXPLAIN)) { + if (parser.isBooleanValue()) { + analyzeRequest.explain(parser.booleanValue()); + } else { + throw new IllegalArgumentException(currentFieldName + " must be either 'true' or 'false'"); + } } else if (parseFieldMatcher.match(currentFieldName, Fields.ATTRIBUTES) && token == XContentParser.Token.START_ARRAY){ List attributes = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/stats/RestIndicesStatsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/stats/RestIndicesStatsAction.java index fa4371846f6..92fb21db38c 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/stats/RestIndicesStatsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/stats/RestIndicesStatsAction.java @@ -78,7 +78,7 @@ public class RestIndicesStatsAction extends BaseRestHandler { indicesStatsRequest.flush(metrics.contains("flush")); indicesStatsRequest.warmer(metrics.contains("warmer")); indicesStatsRequest.queryCache(metrics.contains("query_cache")); - indicesStatsRequest.percolate(metrics.contains("percolate")); + indicesStatsRequest.percolate(metrics.contains("percolator_cache")); indicesStatsRequest.segments(metrics.contains("segments")); indicesStatsRequest.fieldData(metrics.contains("fielddata")); indicesStatsRequest.completion(metrics.contains("completion")); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java index aec087523b8..77366e1cc81 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java @@ -83,8 +83,8 @@ public class RestIndicesAction extends AbstractCatAction { public void processResponse(final ClusterStateResponse clusterStateResponse) { ClusterState state = clusterStateResponse.getState(); final IndicesOptions concreteIndicesOptions = IndicesOptions.fromOptions(false, true, true, true); - final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, concreteIndicesOptions, indices); - final String[] openIndices = indexNameExpressionResolver.concreteIndices(state, IndicesOptions.lenientExpandOpen(), indices); + final String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, concreteIndicesOptions, indices); + final String[] openIndices = indexNameExpressionResolver.concreteIndexNames(state, IndicesOptions.lenientExpandOpen(), indices); ClusterHealthRequest clusterHealthRequest = Requests.clusterHealthRequest(openIndices); clusterHealthRequest.local(request.paramAsBoolean("local", clusterHealthRequest.local())); client.admin().cluster().health(clusterHealthRequest, new RestActionListener(channel) { @@ -222,21 +222,9 @@ public class RestIndicesAction extends AbstractCatAction { table.addCell("merges.total_time", "sibling:pri;alias:mtt,mergesTotalTime;default:false;text-align:right;desc:time spent in merges"); table.addCell("pri.merges.total_time", "default:false;text-align:right;desc:time spent in merges"); - table.addCell("percolate.current", "sibling:pri;alias:pc,percolateCurrent;default:false;text-align:right;desc:number of current percolations"); - table.addCell("pri.percolate.current", "default:false;text-align:right;desc:number of current percolations"); - - table.addCell("percolate.memory_size", "sibling:pri;alias:pm,percolateMemory;default:false;text-align:right;desc:memory used by percolations"); - table.addCell("pri.percolate.memory_size", "default:false;text-align:right;desc:memory used by percolations"); - table.addCell("percolate.queries", "sibling:pri;alias:pq,percolateQueries;default:false;text-align:right;desc:number of registered percolation queries"); table.addCell("pri.percolate.queries", "default:false;text-align:right;desc:number of registered percolation queries"); - table.addCell("percolate.time", "sibling:pri;alias:pti,percolateTime;default:false;text-align:right;desc:time spent percolating"); - table.addCell("pri.percolate.time", "default:false;text-align:right;desc:time spent percolating"); - - table.addCell("percolate.total", "sibling:pri;alias:pto,percolateTotal;default:false;text-align:right;desc:total percolations"); - table.addCell("pri.percolate.total", "default:false;text-align:right;desc:total percolations"); - table.addCell("refresh.total", "sibling:pri;alias:rto,refreshTotal;default:false;text-align:right;desc:total refreshes"); table.addCell("pri.refresh.total", "default:false;text-align:right;desc:total refreshes"); @@ -436,20 +424,8 @@ public class RestIndicesAction extends AbstractCatAction { table.addCell(indexStats == null ? null : indexStats.getTotal().getMerge().getTotalTime()); table.addCell(indexStats == null ? null : indexStats.getPrimaries().getMerge().getTotalTime()); - table.addCell(indexStats == null ? null : indexStats.getTotal().getPercolate().getCurrent()); - table.addCell(indexStats == null ? null : indexStats.getPrimaries().getPercolate().getCurrent()); - - table.addCell(indexStats == null ? null : indexStats.getTotal().getPercolate().getMemorySize()); - table.addCell(indexStats == null ? null : indexStats.getPrimaries().getPercolate().getMemorySize()); - - table.addCell(indexStats == null ? null : indexStats.getTotal().getPercolate().getNumQueries()); - table.addCell(indexStats == null ? null : indexStats.getPrimaries().getPercolate().getNumQueries()); - - table.addCell(indexStats == null ? null : indexStats.getTotal().getPercolate().getTime()); - table.addCell(indexStats == null ? null : indexStats.getPrimaries().getPercolate().getTime()); - - table.addCell(indexStats == null ? null : indexStats.getTotal().getPercolate().getCount()); - table.addCell(indexStats == null ? null : indexStats.getPrimaries().getPercolate().getCount()); + table.addCell(indexStats == null ? null : indexStats.getTotal().getPercolatorCache().getNumQueries()); + table.addCell(indexStats == null ? null : indexStats.getPrimaries().getPercolatorCache().getNumQueries()); table.addCell(indexStats == null ? null : indexStats.getTotal().getRefresh().getTotal()); table.addCell(indexStats == null ? null : indexStats.getPrimaries().getRefresh().getTotal()); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java index 81c1a22cb94..673f928e6d9 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java @@ -45,7 +45,7 @@ import org.elasticsearch.index.fielddata.FieldDataStats; import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.get.GetStats; import org.elasticsearch.index.merge.MergeStats; -import org.elasticsearch.index.percolator.PercolateStats; +import org.elasticsearch.index.percolator.PercolatorQueryCacheStats; import org.elasticsearch.index.refresh.RefreshStats; import org.elasticsearch.index.search.stats.SearchStats; import org.elasticsearch.index.shard.IndexingStats; @@ -186,11 +186,7 @@ public class RestNodesAction extends AbstractCatAction { table.addCell("merges.total_size", "alias:mts,mergesTotalSize;default:false;text-align:right;desc:size merged"); table.addCell("merges.total_time", "alias:mtt,mergesTotalTime;default:false;text-align:right;desc:time spent in merges"); - table.addCell("percolate.current", "alias:pc,percolateCurrent;default:false;text-align:right;desc:number of current percolations"); - table.addCell("percolate.memory_size", "alias:pm,percolateMemory;default:false;text-align:right;desc:memory used by percolations"); table.addCell("percolate.queries", "alias:pq,percolateQueries;default:false;text-align:right;desc:number of registered percolation queries"); - table.addCell("percolate.time", "alias:pti,percolateTime;default:false;text-align:right;desc:time spent percolating"); - table.addCell("percolate.total", "alias:pto,percolateTotal;default:false;text-align:right;desc:total percolations"); table.addCell("refresh.total", "alias:rto,refreshTotal;default:false;text-align:right;desc:total refreshes"); table.addCell("refresh.time", "alias:rti,refreshTime;default:false;text-align:right;desc:time spent in refreshes"); @@ -343,12 +339,8 @@ public class RestNodesAction extends AbstractCatAction { table.addCell(mergeStats == null ? null : mergeStats.getTotalSize()); table.addCell(mergeStats == null ? null : mergeStats.getTotalTime()); - PercolateStats percolateStats = indicesStats == null ? null : indicesStats.getPercolate(); - table.addCell(percolateStats == null ? null : percolateStats.getCurrent()); - table.addCell(percolateStats == null ? null : percolateStats.getMemorySize()); - table.addCell(percolateStats == null ? null : percolateStats.getNumQueries()); - table.addCell(percolateStats == null ? null : percolateStats.getTime()); - table.addCell(percolateStats == null ? null : percolateStats.getCount()); + PercolatorQueryCacheStats percolatorQueryCacheStats = indicesStats == null ? null : indicesStats.getPercolate(); + table.addCell(percolatorQueryCacheStats == null ? null : percolatorQueryCacheStats.getNumQueries()); RefreshStats refreshStats = indicesStats == null ? null : indicesStats.getRefresh(); table.addCell(refreshStats == null ? null : refreshStats.getTotal()); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java index 759fac2eb19..7c555c9b357 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java @@ -92,14 +92,16 @@ public class RestRecoveryAction extends AbstractCatAction { .addCell("repository", "alias:rep;desc:repository") .addCell("snapshot", "alias:snap;desc:snapshot") .addCell("files", "alias:f;desc:number of files to recover") + .addCell("files_recovered", "alias:fr;desc:files recovered") .addCell("files_percent", "alias:fp;desc:percent of files recovered") - .addCell("bytes", "alias:b;desc:size to recover in bytes") + .addCell("files_total", "alias:tf;desc:total number of files") + .addCell("bytes", "alias:b;desc:number of bytes to recover") + .addCell("bytes_recovered", "alias:br;desc:bytes recovered") .addCell("bytes_percent", "alias:bp;desc:percent of bytes recovered") - .addCell("total_files", "alias:tf;desc:total number of files") - .addCell("total_bytes", "alias:tb;desc:total number of bytes") - .addCell("translog", "alias:tr;desc:translog operations recovered") - .addCell("translog_percent", "alias:trp;desc:percent of translog recovery") - .addCell("total_translog", "alias:trt;desc:current total translog operations") + .addCell("bytes_total", "alias:tb;desc:total number of bytes") + .addCell("translog_ops", "alias:to;desc:number of translog ops to recover") + .addCell("translog_ops_recovered", "alias:tor;desc:translog ops recovered") + .addCell("translog_ops_percent", "alias:top;desc:percent of translog ops recovered") .endHeaders(); return t; } @@ -151,14 +153,16 @@ public class RestRecoveryAction extends AbstractCatAction { t.addCell(state.getRestoreSource() == null ? "n/a" : state.getRestoreSource().snapshotId().getRepository()); t.addCell(state.getRestoreSource() == null ? "n/a" : state.getRestoreSource().snapshotId().getSnapshot()); t.addCell(state.getIndex().totalRecoverFiles()); + t.addCell(state.getIndex().recoveredFileCount()); t.addCell(String.format(Locale.ROOT, "%1.1f%%", state.getIndex().recoveredFilesPercent())); - t.addCell(state.getIndex().totalRecoverBytes()); - t.addCell(String.format(Locale.ROOT, "%1.1f%%", state.getIndex().recoveredBytesPercent())); t.addCell(state.getIndex().totalFileCount()); + t.addCell(state.getIndex().totalRecoverBytes()); + t.addCell(state.getIndex().recoveredBytes()); + t.addCell(String.format(Locale.ROOT, "%1.1f%%", state.getIndex().recoveredBytesPercent())); t.addCell(state.getIndex().totalBytes()); + t.addCell(state.getTranslog().totalOperations()); t.addCell(state.getTranslog().recoveredOperations()); t.addCell(String.format(Locale.ROOT, "%1.1f%%", state.getTranslog().recoveredPercent())); - t.addCell(state.getTranslog().totalOperations()); t.endRow(); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java index 94a82e8e773..1b3f239ae5f 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java @@ -139,11 +139,7 @@ public class RestShardsAction extends AbstractCatAction { table.addCell("merges.total_size", "alias:mts,mergesTotalSize;default:false;text-align:right;desc:size merged"); table.addCell("merges.total_time", "alias:mtt,mergesTotalTime;default:false;text-align:right;desc:time spent in merges"); - table.addCell("percolate.current", "alias:pc,percolateCurrent;default:false;text-align:right;desc:number of current percolations"); - table.addCell("percolate.memory_size", "alias:pm,percolateMemory;default:false;text-align:right;desc:memory used by percolations"); table.addCell("percolate.queries", "alias:pq,percolateQueries;default:false;text-align:right;desc:number of registered percolation queries"); - table.addCell("percolate.time", "alias:pti,percolateTime;default:false;text-align:right;desc:time spent percolating"); - table.addCell("percolate.total", "alias:pto,percolateTotal;default:false;text-align:right;desc:total percolations"); table.addCell("refresh.total", "alias:rto,refreshTotal;default:false;text-align:right;desc:total refreshes"); table.addCell("refresh.time", "alias:rti,refreshTime;default:false;text-align:right;desc:time spent in refreshes"); @@ -191,7 +187,7 @@ public class RestShardsAction extends AbstractCatAction { table.addCell(shard.getIndexName()); table.addCell(shard.id()); - IndexMetaData indexMeta = state.getState().getMetaData().index(shard.index()); + IndexMetaData indexMeta = state.getState().getMetaData().getIndexSafe(shard.index()); boolean usesShadowReplicas = false; if (indexMeta != null) { usesShadowReplicas = IndexMetaData.isIndexUsingShadowReplicas(indexMeta.getSettings()); @@ -282,11 +278,7 @@ public class RestShardsAction extends AbstractCatAction { table.addCell(commonStats == null ? null : commonStats.getMerge().getTotalSize()); table.addCell(commonStats == null ? null : commonStats.getMerge().getTotalTime()); - table.addCell(commonStats == null ? null : commonStats.getPercolate().getCurrent()); - table.addCell(commonStats == null ? null : commonStats.getPercolate().getMemorySize()); - table.addCell(commonStats == null ? null : commonStats.getPercolate().getNumQueries()); - table.addCell(commonStats == null ? null : commonStats.getPercolate().getTime()); - table.addCell(commonStats == null ? null : commonStats.getPercolate().getCount()); + table.addCell(commonStats == null ? null : commonStats.getPercolatorCache().getNumQueries()); table.addCell(commonStats == null ? null : commonStats.getRefresh().getTotal()); table.addCell(commonStats == null ? null : commonStats.getRefresh().getTotalTime()); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java index fa2e662c738..a0812f3e9a4 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java @@ -63,7 +63,6 @@ public class RestThreadPoolAction extends AbstractCatAction { ThreadPool.Names.INDEX, ThreadPool.Names.MANAGEMENT, ThreadPool.Names.FORCE_MERGE, - ThreadPool.Names.PERCOLATE, ThreadPool.Names.REFRESH, ThreadPool.Names.SEARCH, ThreadPool.Names.SNAPSHOT, @@ -79,7 +78,6 @@ public class RestThreadPoolAction extends AbstractCatAction { "i", "ma", "fm", - "p", "r", "s", "sn", diff --git a/core/src/main/java/org/elasticsearch/rest/action/main/RestMainAction.java b/core/src/main/java/org/elasticsearch/rest/action/main/RestMainAction.java index bf3f0a3e5df..205bea92f96 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/main/RestMainAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/main/RestMainAction.java @@ -23,7 +23,7 @@ import org.elasticsearch.Build; import org.elasticsearch.Version; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; diff --git a/core/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java b/core/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java index 5d9ac118831..10258aaaee4 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java @@ -44,6 +44,7 @@ import org.elasticsearch.rest.action.support.RestToXContentListener; import org.elasticsearch.script.Template; import org.elasticsearch.search.aggregations.AggregatorParsers; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.suggest.Suggesters; import java.util.Map; @@ -60,13 +61,14 @@ public class RestMultiSearchAction extends BaseRestHandler { private final boolean allowExplicitIndex; private final IndicesQueriesRegistry indicesQueriesRegistry; private final AggregatorParsers aggParsers; - + private final Suggesters suggesters; @Inject public RestMultiSearchAction(Settings settings, RestController controller, Client client, IndicesQueriesRegistry indicesQueriesRegistry, - AggregatorParsers aggParsers) { + AggregatorParsers aggParsers, Suggesters suggesters) { super(settings, client); this.aggParsers = aggParsers; + this.suggesters = suggesters; controller.registerHandler(GET, "/_msearch", this); controller.registerHandler(POST, "/_msearch", this); @@ -97,7 +99,7 @@ public class RestMultiSearchAction extends BaseRestHandler { IndicesOptions indicesOptions = IndicesOptions.fromRequest(request, multiSearchRequest.indicesOptions()); parseRequest(multiSearchRequest, RestActions.getRestContent(request), isTemplateRequest, indices, types, request.param("search_type"), request.param("routing"), indicesOptions, allowExplicitIndex, indicesQueriesRegistry, - parseFieldMatcher, aggParsers); + parseFieldMatcher, aggParsers, suggesters); client.multiSearch(multiSearchRequest, new RestToXContentListener<>(channel)); } @@ -112,7 +114,8 @@ public class RestMultiSearchAction extends BaseRestHandler { @Nullable String routing, IndicesOptions indicesOptions, boolean allowExplicitIndex, IndicesQueriesRegistry indicesQueriesRegistry, - ParseFieldMatcher parseFieldMatcher, AggregatorParsers aggParsers) throws Exception { + ParseFieldMatcher parseFieldMatcher, AggregatorParsers aggParsers, + Suggesters suggesters) throws Exception { XContent xContent = XContentFactory.xContent(data); int from = 0; int length = data.length(); @@ -193,7 +196,7 @@ public class RestMultiSearchAction extends BaseRestHandler { } else { try (XContentParser requestParser = XContentFactory.xContent(slice).createParser(slice)) { queryParseContext.reset(requestParser); - searchRequest.source(SearchSourceBuilder.parseSearchSource(requestParser, queryParseContext, aggParsers)); + searchRequest.source(SearchSourceBuilder.parseSearchSource(requestParser, queryParseContext, aggParsers, suggesters)); } } // move pointers diff --git a/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index d1cd09373f7..9d533d15ff2 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -48,6 +48,8 @@ import org.elasticsearch.search.fetch.source.FetchSourceContext; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.search.suggest.SuggestBuilder; +import org.elasticsearch.search.suggest.term.TermSuggestionBuilder.SuggestMode; +import org.elasticsearch.search.suggest.Suggesters; import java.io.IOException; import java.util.Arrays; @@ -64,13 +66,15 @@ public class RestSearchAction extends BaseRestHandler { private final IndicesQueriesRegistry queryRegistry; private final AggregatorParsers aggParsers; + private final Suggesters suggesters; @Inject public RestSearchAction(Settings settings, RestController controller, Client client, IndicesQueriesRegistry queryRegistry, - AggregatorParsers aggParsers) { + AggregatorParsers aggParsers, Suggesters suggesters) { super(settings, client); this.queryRegistry = queryRegistry; this.aggParsers = aggParsers; + this.suggesters = suggesters; controller.registerHandler(GET, "/_search", this); controller.registerHandler(POST, "/_search", this); controller.registerHandler(GET, "/{index}/_search", this); @@ -88,7 +92,7 @@ public class RestSearchAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws IOException { SearchRequest searchRequest = new SearchRequest(); - RestSearchAction.parseSearchRequest(searchRequest, queryRegistry, request, parseFieldMatcher, aggParsers, null); + parseSearchRequest(searchRequest, queryRegistry, request, parseFieldMatcher, aggParsers, suggesters, null); client.search(searchRequest, new RestStatusToXContentListener<>(channel)); } @@ -101,8 +105,10 @@ public class RestSearchAction extends BaseRestHandler { * content is read from the request using * RestAction.hasBodyContent. */ - public static void parseSearchRequest(SearchRequest searchRequest, IndicesQueriesRegistry indicesQueriesRegistry, RestRequest request, - ParseFieldMatcher parseFieldMatcher, AggregatorParsers aggParsers, BytesReference restContent) throws IOException { + public static void parseSearchRequest(SearchRequest searchRequest, IndicesQueriesRegistry indicesQueriesRegistry, RestRequest request, + ParseFieldMatcher parseFieldMatcher, AggregatorParsers aggParsers, Suggesters suggesters, BytesReference restContent) + throws IOException { + if (searchRequest.source() == null) { searchRequest.source(new SearchSourceBuilder()); } @@ -117,16 +123,15 @@ public class RestSearchAction extends BaseRestHandler { } if (restContent != null) { QueryParseContext context = new QueryParseContext(indicesQueriesRegistry); - if (isTemplateRequest) { - try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) { - context.reset(parser); - context.parseFieldMatcher(parseFieldMatcher); + try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) { + context.reset(parser); + context.parseFieldMatcher(parseFieldMatcher); + if (isTemplateRequest) { Template template = TemplateQueryParser.parse(parser, context.parseFieldMatcher(), "params", "template"); searchRequest.template(template); + } else { + searchRequest.source().parseXContent(parser, context, aggParsers, suggesters); } - } else { - RestActions.parseRestSearchSource(searchRequest.source(), restContent, indicesQueriesRegistry, parseFieldMatcher, - aggParsers); } } @@ -254,8 +259,10 @@ public class RestSearchAction extends BaseRestHandler { String suggestText = request.param("suggest_text", request.param("q")); int suggestSize = request.paramAsInt("suggest_size", 5); String suggestMode = request.param("suggest_mode"); - searchSourceBuilder.suggest(new SuggestBuilder().addSuggestion( - termSuggestion(suggestField).field(suggestField).text(suggestText).size(suggestSize).suggestMode(suggestMode))); + searchSourceBuilder.suggest(new SuggestBuilder().addSuggestion(suggestField, + termSuggestion(suggestField) + .text(suggestText).size(suggestSize) + .suggestMode(SuggestMode.resolve(suggestMode)))); } } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java b/core/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java index 4e6b88b68b8..53d9e668de1 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java @@ -24,9 +24,14 @@ import org.elasticsearch.action.suggest.SuggestResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.Client; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryParseContext; +import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestChannel; @@ -37,6 +42,10 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.support.RestActions; import org.elasticsearch.rest.action.support.RestBuilderListener; import org.elasticsearch.search.suggest.Suggest; +import org.elasticsearch.search.suggest.SuggestBuilder; +import org.elasticsearch.search.suggest.Suggesters; + +import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; @@ -47,9 +56,15 @@ import static org.elasticsearch.rest.action.support.RestActions.buildBroadcastSh */ public class RestSuggestAction extends BaseRestHandler { + private final IndicesQueriesRegistry queryRegistry; + private final Suggesters suggesters; + @Inject - public RestSuggestAction(Settings settings, RestController controller, Client client) { + public RestSuggestAction(Settings settings, RestController controller, Client client, + IndicesQueriesRegistry queryRegistry, Suggesters suggesters) { super(settings, client); + this.queryRegistry = queryRegistry; + this.suggesters = suggesters; controller.registerHandler(POST, "/_suggest", this); controller.registerHandler(GET, "/_suggest", this); controller.registerHandler(POST, "/{index}/_suggest", this); @@ -57,11 +72,17 @@ public class RestSuggestAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws IOException { SuggestRequest suggestRequest = new SuggestRequest(Strings.splitStringByCommaToArray(request.param("index"))); suggestRequest.indicesOptions(IndicesOptions.fromRequest(request, suggestRequest.indicesOptions())); if (RestActions.hasBodyContent(request)) { - suggestRequest.suggest(RestActions.getRestContent(request)); + final BytesReference sourceBytes = RestActions.getRestContent(request); + try (XContentParser parser = XContentFactory.xContent(sourceBytes).createParser(sourceBytes)) { + final QueryParseContext context = new QueryParseContext(queryRegistry); + context.reset(parser); + context.parseFieldMatcher(parseFieldMatcher); + suggestRequest.suggest(SuggestBuilder.fromXContent(context, suggesters)); + } } else { throw new IllegalArgumentException("no content or source provided to execute suggestion"); } @@ -76,7 +97,7 @@ public class RestSuggestAction extends BaseRestHandler { buildBroadcastShardsHeader(builder, request, response); Suggest suggest = response.getSuggest(); if (suggest != null) { - suggest.toXContent(builder, request); + suggest.toInnerXContent(builder, request); } builder.endObject(); return new BytesRestResponse(restStatus, builder); diff --git a/core/src/main/java/org/elasticsearch/rest/action/support/RestActions.java b/core/src/main/java/org/elasticsearch/rest/action/support/RestActions.java index 692a9dc3402..55063664343 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/support/RestActions.java +++ b/core/src/main/java/org/elasticsearch/rest/action/support/RestActions.java @@ -42,6 +42,7 @@ import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.search.aggregations.AggregatorParsers; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.suggest.Suggesters; import java.io.IOException; @@ -114,16 +115,6 @@ public class RestActions { return queryBuilder; } - public static void parseRestSearchSource(SearchSourceBuilder source, BytesReference sourceBytes, IndicesQueriesRegistry queryRegistry, - ParseFieldMatcher parseFieldMatcher, AggregatorParsers aggParsers) - throws IOException { - XContentParser parser = XContentFactory.xContent(sourceBytes).createParser(sourceBytes); - QueryParseContext queryParseContext = new QueryParseContext(queryRegistry); - queryParseContext.reset(parser); - queryParseContext.parseFieldMatcher(parseFieldMatcher); - source.parseXContent(parser, queryParseContext, aggParsers); - } - /** * Get Rest content from either payload or source parameter * @param request Rest request diff --git a/core/src/main/java/org/elasticsearch/script/ScriptService.java b/core/src/main/java/org/elasticsearch/script/ScriptService.java index 8e1ac1c8d77..90c617540fc 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptService.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptService.java @@ -46,6 +46,7 @@ import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; @@ -55,7 +56,6 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.env.Environment; import org.elasticsearch.index.query.TemplateQueryParser; -import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.watcher.FileChangesListener; import org.elasticsearch.watcher.FileWatcher; @@ -84,10 +84,13 @@ public class ScriptService extends AbstractComponent implements Closeable { static final String DISABLE_DYNAMIC_SCRIPTING_SETTING = "script.disable_dynamic"; - public static final Setting SCRIPT_CACHE_SIZE_SETTING = Setting.intSetting("script.cache.max_size", 100, 0, false, Setting.Scope.CLUSTER); - public static final Setting SCRIPT_CACHE_EXPIRE_SETTING = Setting.positiveTimeSetting("script.cache.expire", TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER); + public static final Setting SCRIPT_CACHE_SIZE_SETTING = + Setting.intSetting("script.cache.max_size", 100, 0, Property.NodeScope); + public static final Setting SCRIPT_CACHE_EXPIRE_SETTING = + Setting.positiveTimeSetting("script.cache.expire", TimeValue.timeValueMillis(0), Property.NodeScope); public static final String SCRIPT_INDEX = ".scripts"; - public static final Setting SCRIPT_AUTO_RELOAD_ENABLED_SETTING = Setting.boolSetting("script.auto_reload_enabled", true, false, Setting.Scope.CLUSTER); + public static final Setting SCRIPT_AUTO_RELOAD_ENABLED_SETTING = + Setting.boolSetting("script.auto_reload_enabled", true, Property.NodeScope); private final String defaultLang; @@ -225,6 +228,8 @@ public class ScriptService extends AbstractComponent implements Closeable { return scriptEngineService; } + + /** * Checks if a script can be executed and compiles it if needed, or returns the previously compiled and cached script. */ @@ -516,46 +521,53 @@ public class ScriptService extends AbstractComponent implements Closeable { private class ScriptChangesListener extends FileChangesListener { - private Tuple scriptNameExt(Path file) { + private Tuple getScriptNameExt(Path file) { Path scriptPath = scriptsDirectory.relativize(file); int extIndex = scriptPath.toString().lastIndexOf('.'); - if (extIndex != -1) { - String ext = scriptPath.toString().substring(extIndex + 1); - String scriptName = scriptPath.toString().substring(0, extIndex).replace(scriptPath.getFileSystem().getSeparator(), "_"); - return new Tuple<>(scriptName, ext); - } else { + if (extIndex <= 0) { return null; } + + String ext = scriptPath.toString().substring(extIndex + 1); + if (ext.isEmpty()) { + return null; + } + + String scriptName = scriptPath.toString().substring(0, extIndex).replace(scriptPath.getFileSystem().getSeparator(), "_"); + return new Tuple<>(scriptName, ext); } @Override public void onFileInit(Path file) { + Tuple scriptNameExt = getScriptNameExt(file); + if (scriptNameExt == null) { + logger.debug("Skipped script with invalid extension : [{}]", file); + return; + } if (logger.isTraceEnabled()) { logger.trace("Loading script file : [{}]", file); } - Tuple scriptNameExt = scriptNameExt(file); - if (scriptNameExt != null) { - ScriptEngineService engineService = getScriptEngineServiceForFileExt(scriptNameExt.v2()); - if (engineService == null) { - logger.warn("no script engine found for [{}]", scriptNameExt.v2()); - } else { - try { - //we don't know yet what the script will be used for, but if all of the operations for this lang - // with file scripts are disabled, it makes no sense to even compile it and cache it. - if (isAnyScriptContextEnabled(engineService.getTypes().get(0), engineService, ScriptType.FILE)) { - logger.info("compiling script file [{}]", file.toAbsolutePath()); - try(InputStreamReader reader = new InputStreamReader(Files.newInputStream(file), StandardCharsets.UTF_8)) { - String script = Streams.copyToString(reader); - CacheKey cacheKey = new CacheKey(engineService, scriptNameExt.v1(), null, Collections.emptyMap()); - staticCache.put(cacheKey, new CompiledScript(ScriptType.FILE, scriptNameExt.v1(), engineService.getTypes().get(0), engineService.compile(script, Collections.emptyMap()))); - scriptMetrics.onCompilation(); - } - } else { - logger.warn("skipping compile of script file [{}] as all scripted operations are disabled for file scripts", file.toAbsolutePath()); + + ScriptEngineService engineService = getScriptEngineServiceForFileExt(scriptNameExt.v2()); + if (engineService == null) { + logger.warn("No script engine found for [{}]", scriptNameExt.v2()); + } else { + try { + //we don't know yet what the script will be used for, but if all of the operations for this lang + // with file scripts are disabled, it makes no sense to even compile it and cache it. + if (isAnyScriptContextEnabled(engineService.getTypes().get(0), engineService, ScriptType.FILE)) { + logger.info("compiling script file [{}]", file.toAbsolutePath()); + try (InputStreamReader reader = new InputStreamReader(Files.newInputStream(file), StandardCharsets.UTF_8)) { + String script = Streams.copyToString(reader); + CacheKey cacheKey = new CacheKey(engineService, scriptNameExt.v1(), null, Collections.emptyMap()); + staticCache.put(cacheKey, new CompiledScript(ScriptType.FILE, scriptNameExt.v1(), engineService.getTypes().get(0), engineService.compile(script, Collections.emptyMap()))); + scriptMetrics.onCompilation(); } - } catch (Throwable e) { - logger.warn("failed to load/compile script [{}]", e, scriptNameExt.v1()); + } else { + logger.warn("skipping compile of script file [{}] as all scripted operations are disabled for file scripts", file.toAbsolutePath()); } + } catch (Throwable e) { + logger.warn("failed to load/compile script [{}]", e, scriptNameExt.v1()); } } } @@ -567,7 +579,7 @@ public class ScriptService extends AbstractComponent implements Closeable { @Override public void onFileDeleted(Path file) { - Tuple scriptNameExt = scriptNameExt(file); + Tuple scriptNameExt = getScriptNameExt(file); if (scriptNameExt != null) { ScriptEngineService engineService = getScriptEngineServiceForFileExt(scriptNameExt.v2()); assert engineService != null; diff --git a/core/src/main/java/org/elasticsearch/script/ScriptSettings.java b/core/src/main/java/org/elasticsearch/script/ScriptSettings.java index 8ececfe25bb..1bf7fdfc843 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptSettings.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptSettings.java @@ -21,6 +21,7 @@ package org.elasticsearch.script; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import java.util.ArrayList; @@ -44,8 +45,7 @@ public class ScriptSettings { ScriptModes.sourceKey(scriptType), scriptType.getDefaultScriptMode().getMode(), ScriptMode::parse, - false, - Setting.Scope.CLUSTER)); + Property.NodeScope)); } SCRIPT_TYPE_SETTING_MAP = Collections.unmodifiableMap(scriptTypeSettingMap); } @@ -66,7 +66,7 @@ public class ScriptSettings { throw new IllegalArgumentException("unregistered default language [" + setting + "]"); } return setting; - }, false, Setting.Scope.CLUSTER); + }, Property.NodeScope); } private static Map> contextSettings(ScriptContextRegistry scriptContextRegistry) { @@ -76,8 +76,7 @@ public class ScriptSettings { ScriptModes.operationKey(scriptContext), ScriptMode.OFF.getMode(), ScriptMode::parse, - false, - Setting.Scope.CLUSTER + Property.NodeScope )); } return scriptContextSettingMap; @@ -137,8 +136,7 @@ public class ScriptSettings { ScriptModes.getKey(language, scriptType, scriptContext), defaultSetting, ScriptMode::parse, - false, - Setting.Scope.CLUSTER); + Property.NodeScope); scriptModeSettings.add(setting); } } diff --git a/core/src/main/java/org/elasticsearch/search/SearchException.java b/core/src/main/java/org/elasticsearch/search/SearchException.java index 0d181cc1dce..535f8acd446 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchException.java +++ b/core/src/main/java/org/elasticsearch/search/SearchException.java @@ -45,7 +45,7 @@ public class SearchException extends ElasticsearchException implements Elasticse public SearchException(StreamInput in) throws IOException { super(in); if (in.readBoolean()) { - shardTarget = SearchShardTarget.readSearchShardTarget(in); + shardTarget = new SearchShardTarget(in); } else { shardTarget = null; } @@ -54,7 +54,12 @@ public class SearchException extends ElasticsearchException implements Elasticse @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeOptionalStreamable(shardTarget); + if (shardTarget == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + shardTarget.writeTo(out); + } } public SearchShardTarget shard() { diff --git a/core/src/main/java/org/elasticsearch/search/SearchModule.java b/core/src/main/java/org/elasticsearch/search/SearchModule.java index 0d4faca3672..ae6d5aaf4be 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/core/src/main/java/org/elasticsearch/search/SearchModule.java @@ -62,6 +62,7 @@ import org.elasticsearch.index.query.MoreLikeThisQueryParser; import org.elasticsearch.index.query.MultiMatchQueryParser; import org.elasticsearch.index.query.NestedQueryParser; import org.elasticsearch.index.query.ParentIdQueryParser; +import org.elasticsearch.index.query.PercolatorQueryParser; import org.elasticsearch.index.query.PrefixQueryParser; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryParser; @@ -215,6 +216,7 @@ import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsFetchSubPhase; import org.elasticsearch.search.fetch.innerhits.InnerHitsFetchSubPhase; import org.elasticsearch.search.fetch.matchedqueries.MatchedQueriesFetchSubPhase; import org.elasticsearch.search.fetch.parent.ParentFieldSubFetchPhase; +import org.elasticsearch.index.percolator.PercolatorHighlightSubFetchPhase; import org.elasticsearch.search.fetch.script.ScriptFieldsFetchSubPhase; import org.elasticsearch.search.fetch.source.FetchSourceSubPhase; import org.elasticsearch.search.fetch.version.VersionFetchSubPhase; @@ -226,6 +228,14 @@ import org.elasticsearch.search.rescore.QueryRescorerBuilder; import org.elasticsearch.search.rescore.RescoreBuilder; import org.elasticsearch.search.suggest.Suggester; import org.elasticsearch.search.suggest.Suggesters; +import org.elasticsearch.search.suggest.SuggestionBuilder; +import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder; +import org.elasticsearch.search.suggest.phrase.Laplace; +import org.elasticsearch.search.suggest.phrase.LinearInterpolation; +import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder; +import org.elasticsearch.search.suggest.phrase.SmoothingModel; +import org.elasticsearch.search.suggest.phrase.StupidBackoff; +import org.elasticsearch.search.suggest.term.TermSuggestionBuilder; import java.util.ArrayList; import java.util.HashMap; @@ -277,8 +287,9 @@ public class SearchModule extends AbstractModule { highlighters.registerExtension(key, clazz); } - public void registerSuggester(String key, Class suggester) { - suggesters.registerExtension(key, suggester); + public void registerSuggester(String key, Suggester suggester) { + suggesters.registerExtension(key, suggester.getClass()); + namedWriteableRegistry.registerPrototype(SuggestionBuilder.class, suggester.getBuilderPrototype()); } /** @@ -347,6 +358,7 @@ public class SearchModule extends AbstractModule { fetchSubPhaseMultibinder.addBinding().to(MatchedQueriesFetchSubPhase.class); fetchSubPhaseMultibinder.addBinding().to(HighlightPhase.class); fetchSubPhaseMultibinder.addBinding().to(ParentFieldSubFetchPhase.class); + fetchSubPhaseMultibinder.addBinding().to(PercolatorHighlightSubFetchPhase.class); for (Class clazz : fetchSubPhases) { fetchSubPhaseMultibinder.addBinding().to(clazz); } @@ -371,6 +383,12 @@ public class SearchModule extends AbstractModule { protected void configureSuggesters() { suggesters.bind(binder()); + namedWriteableRegistry.registerPrototype(SuggestionBuilder.class, TermSuggestionBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(SuggestionBuilder.class, PhraseSuggestionBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(SuggestionBuilder.class, CompletionSuggestionBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(SmoothingModel.class, Laplace.PROTOTYPE); + namedWriteableRegistry.registerPrototype(SmoothingModel.class, LinearInterpolation.PROTOTYPE); + namedWriteableRegistry.registerPrototype(SmoothingModel.class, StupidBackoff.PROTOTYPE); } protected void configureHighlighters() { @@ -531,6 +549,7 @@ public class SearchModule extends AbstractModule { registerQueryParser(ExistsQueryParser::new); registerQueryParser(MatchNoneQueryParser::new); registerQueryParser(ParentIdQueryParser::new); + registerQueryParser(PercolatorQueryParser::new); if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) { registerQueryParser(GeoShapeQueryParser::new); } diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index eb3568296ad..be2e52b5aa3 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -24,8 +24,8 @@ import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.TopDocs; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cache.recycler.PageCacheRecycler; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.bytes.BytesReference; @@ -34,6 +34,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; @@ -47,6 +48,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.fieldstats.FieldStatsProvider; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.search.stats.ShardSearchStats; @@ -54,7 +56,6 @@ import org.elasticsearch.index.search.stats.StatsGroupsParseElement; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.indices.IndicesRequestCache; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptService; @@ -91,6 +92,7 @@ import org.elasticsearch.search.query.QuerySearchResultProvider; import org.elasticsearch.search.query.ScrollQuerySearchResult; import org.elasticsearch.search.rescore.RescoreBuilder; import org.elasticsearch.search.searchafter.SearchAfterBuilder; +import org.elasticsearch.search.suggest.Suggesters; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; @@ -111,11 +113,14 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes; public class SearchService extends AbstractLifecycleComponent implements IndexEventListener { // we can have 5 minutes here, since we make sure to clean with search requests and when shard/index closes - public static final Setting DEFAULT_KEEPALIVE_SETTING = Setting.positiveTimeSetting("search.default_keep_alive", timeValueMinutes(5), false, Setting.Scope.CLUSTER); - public static final Setting KEEPALIVE_INTERVAL_SETTING = Setting.positiveTimeSetting("search.keep_alive_interval", timeValueMinutes(1), false, Setting.Scope.CLUSTER); + public static final Setting DEFAULT_KEEPALIVE_SETTING = + Setting.positiveTimeSetting("search.default_keep_alive", timeValueMinutes(5), Property.NodeScope); + public static final Setting KEEPALIVE_INTERVAL_SETTING = + Setting.positiveTimeSetting("search.keep_alive_interval", timeValueMinutes(1), Property.NodeScope); public static final TimeValue NO_TIMEOUT = timeValueMillis(-1); - public static final Setting DEFAULT_SEARCH_TIMEOUT_SETTING = Setting.timeSetting("search.default_search_timeout", NO_TIMEOUT, true, Setting.Scope.CLUSTER); + public static final Setting DEFAULT_SEARCH_TIMEOUT_SETTING = + Setting.timeSetting("search.default_search_timeout", NO_TIMEOUT, Property.Dynamic, Property.NodeScope); private final ThreadPool threadPool; @@ -149,14 +154,16 @@ public class SearchService extends AbstractLifecycleComponent imp private final Map elementParsers; private final ParseFieldMatcher parseFieldMatcher; - private AggregatorParsers aggParsers; + private final AggregatorParsers aggParsers; + private final Suggesters suggesters; @Inject public SearchService(Settings settings, ClusterSettings clusterSettings, ClusterService clusterService, IndicesService indicesService, - ThreadPool threadPool, ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, DfsPhase dfsPhase, - QueryPhase queryPhase, FetchPhase fetchPhase, AggregatorParsers aggParsers) { + ThreadPool threadPool, ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, DfsPhase dfsPhase, + QueryPhase queryPhase, FetchPhase fetchPhase, AggregatorParsers aggParsers, Suggesters suggesters) { super(settings); this.aggParsers = aggParsers; + this.suggesters = suggesters; this.parseFieldMatcher = new ParseFieldMatcher(settings); this.threadPool = threadPool; this.clusterService = clusterService; @@ -192,7 +199,7 @@ public class SearchService extends AbstractLifecycleComponent imp public void afterIndexClosed(Index index, Settings indexSettings) { // once an index is closed we can just clean up all the pending search context information // to release memory and let references to the filesystem go etc. - IndexMetaData idxMeta = SearchService.this.clusterService.state().metaData().index(index.getName()); + IndexMetaData idxMeta = SearchService.this.clusterService.state().metaData().index(index); if (idxMeta != null && idxMeta.getState() == IndexMetaData.State.CLOSE) { // we need to check if it's really closed // since sometimes due to a relocation we already closed the shard and that causes the index to be closed @@ -234,7 +241,7 @@ public class SearchService extends AbstractLifecycleComponent imp FutureUtils.cancel(keepAliveReaper); } - public DfsSearchResult executeDfsPhase(ShardSearchRequest request) { + public DfsSearchResult executeDfsPhase(ShardSearchRequest request) throws IOException { final SearchContext context = createAndPutContext(request); try { contextProcessing(context); @@ -263,7 +270,7 @@ public class SearchService extends AbstractLifecycleComponent imp } } - public QuerySearchResultProvider executeQueryPhase(ShardSearchRequest request) { + public QuerySearchResultProvider executeQueryPhase(ShardSearchRequest request) throws IOException { final SearchContext context = createAndPutContext(request); final ShardSearchStats shardSearchStats = context.indexShard().searchService(); try { @@ -355,7 +362,7 @@ public class SearchService extends AbstractLifecycleComponent imp } } - public QueryFetchSearchResult executeFetchPhase(ShardSearchRequest request) { + public QueryFetchSearchResult executeFetchPhase(ShardSearchRequest request) throws IOException { final SearchContext context = createAndPutContext(request); contextProcessing(context); try { @@ -512,7 +519,7 @@ public class SearchService extends AbstractLifecycleComponent imp return context; } - final SearchContext createAndPutContext(ShardSearchRequest request) { + final SearchContext createAndPutContext(ShardSearchRequest request) throws IOException { SearchContext context = createContext(request, null); boolean success = false; try { @@ -530,11 +537,10 @@ public class SearchService extends AbstractLifecycleComponent imp } } - final SearchContext createContext(ShardSearchRequest request, @Nullable Engine.Searcher searcher) { - IndexService indexService = indicesService.indexServiceSafe(request.index()); - IndexShard indexShard = indexService.getShard(request.shardId()); - - SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().id(), indexShard.shardId().getIndex(), request.shardId()); + final SearchContext createContext(ShardSearchRequest request, @Nullable Engine.Searcher searcher) throws IOException { + IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); + IndexShard indexShard = indexService.getShard(request.shardId().getId()); + SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().id(), indexShard.shardId()); Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher; @@ -542,6 +548,8 @@ public class SearchService extends AbstractLifecycleComponent imp indexService, indexShard, scriptService, pageCacheRecycler, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, defaultSearchTimeout, fetchPhase); + context.getQueryShardContext().setFieldStatsProvider(new FieldStatsProvider(engineSearcher, indexService.mapperService())); + request.rewrite(context.getQueryShardContext()); SearchContext.setCurrent(context); try { if (request.scroll() != null) { @@ -555,7 +563,7 @@ public class SearchService extends AbstractLifecycleComponent imp QueryParseContext queryParseContext = new QueryParseContext(indicesService.getIndicesQueryRegistry()); queryParseContext.reset(parser); queryParseContext.parseFieldMatcher(parseFieldMatcher); - parseSource(context, SearchSourceBuilder.parseSearchSource(parser, queryParseContext, aggParsers)); + parseSource(context, SearchSourceBuilder.parseSearchSource(parser, queryParseContext, aggParsers, suggesters)); } } parseSource(context, request.source()); @@ -718,26 +726,16 @@ public class SearchService extends AbstractLifecycleComponent imp } } if (source.suggest() != null) { - XContentParser suggestParser = null; try { - suggestParser = XContentFactory.xContent(source.suggest()).createParser(source.suggest()); - suggestParser.nextToken(); - this.elementParsers.get("suggest").parse(suggestParser, context); - } catch (Exception e) { - String sSource = "_na_"; - try { - sSource = source.toString(); - } catch (Throwable e1) { - // ignore - } - XContentLocation location = suggestParser != null ? suggestParser.getTokenLocation() : null; - throw new SearchParseException(context, "failed to parse suggest source [" + sSource + "]", location, e); + context.suggest(source.suggest().build(queryShardContext)); + } catch (IOException e) { + throw new SearchContextException(context, "failed to create SuggestionSearchContext", e); } } if (source.rescores() != null) { try { for (RescoreBuilder rescore : source.rescores()) { - context.addRescore(rescore.build(context.getQueryShardContext())); + context.addRescore(rescore.build(queryShardContext)); } } catch (IOException e) { throw new SearchContextException(context, "failed to create RescoreSearchContext", e); @@ -762,7 +760,7 @@ public class SearchService extends AbstractLifecycleComponent imp if (source.highlighter() != null) { HighlightBuilder highlightBuilder = source.highlighter(); try { - context.highlight(highlightBuilder.build(context.getQueryShardContext())); + context.highlight(highlightBuilder.build(queryShardContext)); } catch (IOException e) { throw new SearchContextException(context, "failed to create SearchContextHighlighter", e); } @@ -802,6 +800,11 @@ public class SearchService extends AbstractLifecycleComponent imp } else { SearchParseElement parseElement = this.elementParsers.get(currentFieldName); if (parseElement == null) { + if (currentFieldName != null && currentFieldName.equals("suggest")) { + throw new SearchParseException(context, + "suggest is not supported in [ext], please use SearchSourceBuilder#suggest(SuggestBuilder) instead", + extParser.getTokenLocation()); + } throw new SearchParseException(context, "Unknown element [" + currentFieldName + "] in [ext]", extParser.getTokenLocation()); } else { diff --git a/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java b/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java index d3958505d70..d675a93b691 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java +++ b/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java @@ -23,28 +23,38 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; import java.io.IOException; /** * The target that the search request was executed on. */ -public class SearchShardTarget implements Streamable, Comparable { +public class SearchShardTarget implements Writeable, Comparable { private Text nodeId; private Text index; - private int shardId; + private ShardId shardId; - private SearchShardTarget() { + public SearchShardTarget(StreamInput in) throws IOException { + if (in.readBoolean()) { + nodeId = in.readText(); + } + shardId = ShardId.readShardId(in); + index = new Text(shardId.getIndexName()); + } + public SearchShardTarget(String nodeId, ShardId shardId) { + this.nodeId = nodeId == null ? null : new Text(nodeId); + this.index = new Text(shardId.getIndexName()); + this.shardId = shardId; } public SearchShardTarget(String nodeId, Index index, int shardId) { - this.nodeId = nodeId == null ? null : new Text(nodeId); - this.index = new Text(index.getName()); - this.shardId = shardId; + this(nodeId, new ShardId(index, shardId)); } @Nullable @@ -73,36 +83,26 @@ public class SearchShardTarget implements Streamable, Comparable @Override protected void doReadFrom(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) { - this.docCountError = in.readLong(); - } else { - this.docCountError = -1; - } + this.docCountError = in.readLong(); this.order = InternalOrder.Streams.readOrder(in); this.formatter = ValueFormatterStreams.readOptional(in); this.requiredSize = readSize(in); @@ -218,9 +214,7 @@ public class DoubleTerms extends InternalTerms @Override protected void doWriteTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) { - out.writeLong(docCountError); - } + out.writeLong(docCountError); InternalOrder.Streams.writeOrder(order, out); ValueFormatterStreams.writeOptional(formatter, out); writeSize(requiredSize, out); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java index 91e949e190f..4377b9debbb 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java @@ -26,7 +26,6 @@ import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LongBitSet; -import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lease.Releasables; @@ -136,7 +135,7 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr protected static void copy(BytesRef from, BytesRef to) { if (to.bytes.length < from.length) { - to.bytes = new byte[ArrayUtil.oversize(from.length, RamUsageEstimator.NUM_BYTES_BYTE)]; + to.bytes = new byte[ArrayUtil.oversize(from.length, 1)]; } to.offset = 0; to.length = from.length; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java index 0b9ebd97cf9..040768f9d3b 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java @@ -105,7 +105,7 @@ public class StringTerms extends InternalTerms @Override int compareTerm(Terms.Bucket other) { - return BytesRef.getUTF8SortedAsUnicodeComparator().compare(termBytes, ((Bucket) other).termBytes); + return termBytes.compareTo(((Bucket) other).termBytes); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/IncludeExclude.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/IncludeExclude.java index eee9d4cbf90..41dd0bb441e 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/IncludeExclude.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/IncludeExclude.java @@ -518,13 +518,13 @@ public class IncludeExclude implements Writeable, ToXContent { if (includeValues != null) { for (BytesRef val : includeValues) { double dval=Double.parseDouble(val.utf8ToString()); - result.addAccept( NumericUtils.doubleToSortableLong(dval)); + result.addAccept(NumericUtils.doubleToSortableLong(dval)); } } if (excludeValues != null) { for (BytesRef val : excludeValues) { double dval=Double.parseDouble(val.utf8ToString()); - result.addReject( NumericUtils.doubleToSortableLong(dval)); + result.addReject(NumericUtils.doubleToSortableLong(dval)); } } return result; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlus.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlus.java index 2e8ce4563ce..568ecdbec59 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlus.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlus.java @@ -21,7 +21,6 @@ package org.elasticsearch.search.aggregations.metrics.cardinality; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LongBitSet; -import org.apache.lucene.util.RamUsageEstimator; import org.apache.lucene.util.packed.PackedInts; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -67,7 +66,7 @@ public final class HyperLogLogPlusPlus implements Releasable { */ public static int precisionFromThreshold(long count) { final long hashTableEntries = (long) Math.ceil(count / MAX_LOAD_FACTOR); - int precision = PackedInts.bitsRequired(hashTableEntries * RamUsageEstimator.NUM_BYTES_INT); + int precision = PackedInts.bitsRequired(hashTableEntries * Integer.BYTES); precision = Math.max(precision, MIN_PRECISION); precision = Math.min(precision, MAX_PRECISION); return precision; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksAggregatorBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksAggregatorBuilder.java index cd43777959e..de66f68103f 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksAggregatorBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksAggregatorBuilder.java @@ -48,7 +48,7 @@ public class PercentileRanksAggregatorBuilder extends LeafOnly= counts.size()) { + return buildEmptyAggregation(); } - assert owningBucketOrdinal < counts.size(); - return new InternalExtendedStats(name, counts.get(owningBucketOrdinal), sums.get(owningBucketOrdinal), - mins.get(owningBucketOrdinal), maxes.get(owningBucketOrdinal), sumOfSqrs.get(owningBucketOrdinal), sigma, formatter, + return new InternalExtendedStats(name, counts.get(bucket), sums.get(bucket), + mins.get(bucket), maxes.get(bucket), sumOfSqrs.get(bucket), sigma, formatter, pipelineAggregators(), metaData()); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java index 543c5907070..9fac5809cef 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java @@ -158,19 +158,13 @@ public class InternalExtendedStats extends InternalStats implements ExtendedStat @Override public void readOtherStatsFrom(StreamInput in) throws IOException { sumOfSqrs = in.readDouble(); - if (in.getVersion().onOrAfter(Version.V_1_4_3)) { - sigma = in.readDouble(); - } else { - sigma = 2.0; - } + sigma = in.readDouble(); } @Override protected void writeOtherStatsTo(StreamOutput out) throws IOException { out.writeDouble(sumOfSqrs); - if (out.getVersion().onOrAfter(Version.V_1_4_3)) { - out.writeDouble(sigma); - } + out.writeDouble(sigma); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/DerivativePipelineAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/DerivativePipelineAggregator.java index 1289da661c2..cea99cf868e 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/DerivativePipelineAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/DerivativePipelineAggregator.java @@ -92,7 +92,7 @@ public class DerivativePipelineAggregator extends PipelineAggregator { for (InternalHistogram.Bucket bucket : buckets) { Long thisBucketKey = resolveBucketKeyAsLong(bucket); Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], gapPolicy); - if (lastBucketValue != null) { + if (lastBucketValue != null && thisBucketValue != null) { double gradient = thisBucketValue - lastBucketValue; double xDiff = -1; if (xAxisUnits != null) { diff --git a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 0cb83dbd2f9..a20ec535238 100644 --- a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -40,6 +40,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryParseContext; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.AggregatorBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -55,6 +56,7 @@ import org.elasticsearch.search.sort.SortBuilder; import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.search.suggest.SuggestBuilder; +import org.elasticsearch.search.suggest.Suggesters; import java.io.IOException; import java.util.ArrayList; @@ -105,9 +107,10 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ return PROTOTYPE.readFrom(in); } - public static SearchSourceBuilder parseSearchSource(XContentParser parser, QueryParseContext context, AggregatorParsers aggParsers) + public static SearchSourceBuilder parseSearchSource(XContentParser parser, QueryParseContext context, + AggregatorParsers aggParsers, Suggesters suggesters) throws IOException { - return PROTOTYPE.fromXContent(parser, context, aggParsers); + return PROTOTYPE.fromXContent(parser, context, aggParsers, suggesters); } /** @@ -156,7 +159,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ private HighlightBuilder highlightBuilder; - private BytesReference suggestBuilder; + private SuggestBuilder suggestBuilder; private BytesReference innerHitsBuilder; @@ -475,20 +478,14 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ } public SearchSourceBuilder suggest(SuggestBuilder suggestBuilder) { - try { - XContentBuilder builder = XContentFactory.jsonBuilder(); - suggestBuilder.toXContent(builder, EMPTY_PARAMS); - this.suggestBuilder = builder.bytes(); - return this; - } catch (IOException e) { - throw new RuntimeException(e); - } + this.suggestBuilder = suggestBuilder; + return this; } /** - * Gets the bytes representing the suggester builder for this request. + * Gets the suggester builder for this request. */ - public BytesReference suggest() { + public SuggestBuilder suggest() { return suggestBuilder; } @@ -733,22 +730,78 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ return ext; } + /** + * Rewrites this search source builder into its primitive form. e.g. by + * rewriting the QueryBuilder. If the builder did not change the identity + * reference must be returned otherwise the builder will be rewritten + * infinitely. + */ + public SearchSourceBuilder rewrite(QueryShardContext context) throws IOException { + assert (this.equals(shallowCopy(queryBuilder, postQueryBuilder))); + QueryBuilder queryBuilder = null; + if (this.queryBuilder != null) { + queryBuilder = this.queryBuilder.rewrite(context); + } + QueryBuilder postQueryBuilder = null; + if (this.postQueryBuilder != null) { + postQueryBuilder = this.postQueryBuilder.rewrite(context); + } + boolean rewritten = queryBuilder != this.queryBuilder || postQueryBuilder != this.postQueryBuilder; + if (rewritten) { + return shallowCopy(queryBuilder, postQueryBuilder); + } + return this; + } + + private SearchSourceBuilder shallowCopy(QueryBuilder queryBuilder, QueryBuilder postQueryBuilder) { + SearchSourceBuilder rewrittenBuilder = new SearchSourceBuilder(); + rewrittenBuilder.aggregations = aggregations; + rewrittenBuilder.explain = explain; + rewrittenBuilder.ext = ext; + rewrittenBuilder.fetchSourceContext = fetchSourceContext; + rewrittenBuilder.fieldDataFields = fieldDataFields; + rewrittenBuilder.fieldNames = fieldNames; + rewrittenBuilder.from = from; + rewrittenBuilder.highlightBuilder = highlightBuilder; + rewrittenBuilder.indexBoost = indexBoost; + rewrittenBuilder.innerHitsBuilder = innerHitsBuilder; + rewrittenBuilder.minScore = minScore; + rewrittenBuilder.postQueryBuilder = postQueryBuilder; + rewrittenBuilder.profile = profile; + rewrittenBuilder.queryBuilder = queryBuilder; + rewrittenBuilder.rescoreBuilders = rescoreBuilders; + rewrittenBuilder.scriptFields = scriptFields; + rewrittenBuilder.searchAfterBuilder = searchAfterBuilder; + rewrittenBuilder.size = size; + rewrittenBuilder.sorts = sorts; + rewrittenBuilder.stats = stats; + rewrittenBuilder.suggestBuilder = suggestBuilder; + rewrittenBuilder.terminateAfter = terminateAfter; + rewrittenBuilder.timeoutInMillis = timeoutInMillis; + rewrittenBuilder.trackScores = trackScores; + rewrittenBuilder.version = version; + return rewrittenBuilder; + } + /** * Create a new SearchSourceBuilder with attributes set by an xContent. */ - public SearchSourceBuilder fromXContent(XContentParser parser, QueryParseContext context, AggregatorParsers aggParsers) + public SearchSourceBuilder fromXContent(XContentParser parser, QueryParseContext context, + AggregatorParsers aggParsers, Suggesters suggesters) throws IOException { SearchSourceBuilder builder = new SearchSourceBuilder(); - builder.parseXContent(parser, context, aggParsers); + builder.parseXContent(parser, context, aggParsers, suggesters); return builder; } /** * Parse some xContent into this SearchSourceBuilder, overwriting any values specified in the xContent. Use this if you need to set up * different defaults than a regular SearchSourceBuilder would have and use - * {@link #fromXContent(XContentParser, QueryParseContext, AggregatorParsers)} if you have normal defaults. + * {@link #fromXContent(XContentParser, QueryParseContext, AggregatorParsers, Suggesters)} if you have normal defaults. */ - public void parseXContent(XContentParser parser, QueryParseContext context, AggregatorParsers aggParsers) throws IOException { + public void parseXContent(XContentParser parser, QueryParseContext context, AggregatorParsers aggParsers, Suggesters suggesters) + throws IOException { + XContentParser.Token token = parser.currentToken(); String currentFieldName = null; if (token != XContentParser.Token.START_OBJECT && (token = parser.nextToken()) != XContentParser.Token.START_OBJECT) { @@ -852,8 +905,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser); innerHitsBuilder = xContentBuilder.bytes(); } else if (context.parseFieldMatcher().match(currentFieldName, SUGGEST_FIELD)) { - XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser); - suggestBuilder = xContentBuilder.bytes(); + suggestBuilder = SuggestBuilder.fromXContent(context, suggesters); } else if (context.parseFieldMatcher().match(currentFieldName, SORT_FIELD)) { sorts = new ArrayList<>(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser); @@ -1050,10 +1102,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ } if (suggestBuilder != null) { - builder.field(SUGGEST_FIELD.getPreferredName()); - XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(suggestBuilder); - parser.nextToken(); - builder.copyCurrentStructure(parser); + builder.field(SUGGEST_FIELD.getPreferredName(), suggestBuilder); } if (rescoreBuilders != null) { @@ -1232,7 +1281,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ builder.stats = stats; } if (in.readBoolean()) { - builder.suggestBuilder = in.readBytesReference(); + builder.suggestBuilder = SuggestBuilder.PROTOTYPE.readFrom(in); } builder.terminateAfter = in.readVInt(); builder.timeoutInMillis = in.readLong(); @@ -1348,7 +1397,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ boolean hasSuggestBuilder = suggestBuilder != null; out.writeBoolean(hasSuggestBuilder); if (hasSuggestBuilder) { - out.writeBytesReference(suggestBuilder); + suggestBuilder.writeTo(out); } out.writeVInt(terminateAfter); out.writeLong(timeoutInMillis); diff --git a/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java b/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java index ad8e71f5b93..d5d4607fba9 100644 --- a/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java +++ b/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java @@ -76,7 +76,7 @@ public class SearchPhaseController extends AbstractComponent { public int compare(AtomicArray.Entry o1, AtomicArray.Entry o2) { int i = o1.value.shardTarget().index().compareTo(o2.value.shardTarget().index()); if (i == 0) { - i = o1.value.shardTarget().shardId() - o2.value.shardTarget().shardId(); + i = o1.value.shardTarget().shardId().id() - o2.value.shardTarget().shardId().id(); } return i; } @@ -386,7 +386,7 @@ public class SearchPhaseController extends AbstractComponent { Suggest.group(groupedSuggestions, shardResult); } - suggest = hasSuggestions ? new Suggest(Suggest.Fields.SUGGEST, Suggest.reduce(groupedSuggestions)) : null; + suggest = hasSuggestions ? new Suggest(Suggest.reduce(groupedSuggestions)) : null; } // merge addAggregation diff --git a/core/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java b/core/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java index fb0fc75299f..dbaee5b64bb 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java @@ -26,7 +26,6 @@ import org.elasticsearch.transport.TransportResponse; import java.io.IOException; -import static org.elasticsearch.search.SearchShardTarget.readSearchShardTarget; import static org.elasticsearch.search.fetch.QueryFetchSearchResult.readQueryFetchSearchResult; /** @@ -56,7 +55,7 @@ public class ScrollQueryFetchSearchResult extends TransportResponse { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - shardTarget = readSearchShardTarget(in); + shardTarget = new SearchShardTarget(in); result = readQueryFetchSearchResult(in); result.shardTarget(shardTarget); } diff --git a/core/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java b/core/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java index 8ad24b5cb19..7a15f67dbd6 100644 --- a/core/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java +++ b/core/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java @@ -78,12 +78,12 @@ public final class CustomQueryScorer extends QueryScorer { Map terms) throws IOException { if (query instanceof FunctionScoreQuery) { query = ((FunctionScoreQuery) query).getSubQuery(); - extract(query, query.getBoost(), terms); + extract(query, 1F, terms); } else if (query instanceof FiltersFunctionScoreQuery) { query = ((FiltersFunctionScoreQuery) query).getSubQuery(); - extract(query, query.getBoost(), terms); + extract(query, 1F, terms); } else if (terms.isEmpty()) { - extractWeightedTerms(terms, query, query.getBoost()); + extractWeightedTerms(terms, query, 1F); } } diff --git a/core/src/main/java/org/elasticsearch/search/highlight/vectorhighlight/FragmentBuilderHelper.java b/core/src/main/java/org/elasticsearch/search/highlight/vectorhighlight/FragmentBuilderHelper.java index b3175e6c22a..b9ae34b60b0 100644 --- a/core/src/main/java/org/elasticsearch/search/highlight/vectorhighlight/FragmentBuilderHelper.java +++ b/core/src/main/java/org/elasticsearch/search/highlight/vectorhighlight/FragmentBuilderHelper.java @@ -89,23 +89,12 @@ public final class FragmentBuilderHelper { } if (analyzer instanceof CustomAnalyzer) { final CustomAnalyzer a = (CustomAnalyzer) analyzer; - if (a.tokenizerFactory() instanceof EdgeNGramTokenizerFactory - || (a.tokenizerFactory() instanceof NGramTokenizerFactory - && !((NGramTokenizerFactory)a.tokenizerFactory()).version().onOrAfter(Version.LUCENE_4_2))) { - // ngram tokenizer is broken before 4.2 - return true; - } TokenFilterFactory[] tokenFilters = a.tokenFilters(); for (TokenFilterFactory tokenFilterFactory : tokenFilters) { if (tokenFilterFactory instanceof WordDelimiterTokenFilterFactory || tokenFilterFactory instanceof EdgeNGramTokenFilterFactory) { return true; } - if (tokenFilterFactory instanceof NGramTokenFilterFactory - && !((NGramTokenFilterFactory)tokenFilterFactory).version().onOrAfter(Version.LUCENE_4_2)) { - // ngram token filter is broken before 4.2 - return true; - } } } return false; diff --git a/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java index 6c01a27442e..71a289331f8 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java @@ -22,7 +22,6 @@ package org.elasticsearch.search.internal; import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.Collector; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Query; @@ -50,6 +49,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.internal.TypeFieldMapper; import org.elasticsearch.index.mapper.object.ObjectMapper; +import org.elasticsearch.index.percolator.PercolatorQueryCache; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.QueryShardContext; @@ -233,9 +233,6 @@ public class DefaultSearchContext extends SearchContext { Query result; if (Queries.isConstantMatchAllQuery(query())) { result = new ConstantScoreQuery(searchFilter); - if (query().getBoost() != AbstractQueryBuilder.DEFAULT_BOOST) { - result = new BoostQuery(result, query().getBoost()); - } } else { result = new BooleanQuery.Builder() .add(query, Occur.MUST) @@ -491,6 +488,11 @@ public class DefaultSearchContext extends SearchContext { return indexService.fieldData(); } + @Override + public PercolatorQueryCache percolatorQueryCache() { + return indexService.cache().getPercolatorQueryCache(); + } + @Override public long timeoutInMillis() { return timeoutInMillis; diff --git a/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java index 83ea2b1ccd8..fedab3f9782 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java @@ -34,6 +34,7 @@ import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.object.ObjectMapper; +import org.elasticsearch.index.percolator.PercolatorQueryCache; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.IndexShard; @@ -274,6 +275,11 @@ public abstract class FilteredSearchContext extends SearchContext { return in.fieldData(); } + @Override + public PercolatorQueryCache percolatorQueryCache() { + return in.percolatorQueryCache(); + } + @Override public long timeoutInMillis() { return in.timeoutInMillis(); diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java index c6afe325bb3..dcbcce503a4 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java +++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java @@ -55,7 +55,6 @@ import static java.util.Collections.singletonMap; import static java.util.Collections.unmodifiableMap; import static org.elasticsearch.common.lucene.Lucene.readExplanation; import static org.elasticsearch.common.lucene.Lucene.writeExplanation; -import static org.elasticsearch.search.SearchShardTarget.readSearchShardTarget; import static org.elasticsearch.search.highlight.HighlightField.readHighlightField; import static org.elasticsearch.search.internal.InternalSearchHitField.readSearchHitField; @@ -638,7 +637,7 @@ public class InternalSearchHit implements SearchHit { if (context.streamShardTarget() == ShardTargetType.STREAM) { if (in.readBoolean()) { - shard = readSearchShardTarget(in); + shard = new SearchShardTarget(in); } } else if (context.streamShardTarget() == ShardTargetType.LOOKUP) { int lookupId = in.readVInt(); diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java index 9e787cf2aa9..09d11e1a1a3 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java +++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java @@ -34,7 +34,6 @@ import java.util.IdentityHashMap; import java.util.Iterator; import java.util.Map; -import static org.elasticsearch.search.SearchShardTarget.readSearchShardTarget; import static org.elasticsearch.search.internal.InternalSearchHit.readSearchHit; /** @@ -216,7 +215,7 @@ public class InternalSearchHits implements SearchHits { // read the lookup table first int lookupSize = in.readVInt(); for (int i = 0; i < lookupSize; i++) { - context.handleShardLookup().put(in.readVInt(), readSearchShardTarget(in)); + context.handleShardLookup().put(in.readVInt(), new SearchShardTarget(in)); } } @@ -262,4 +261,4 @@ public class InternalSearchHits implements SearchHits { } } } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java index b8255e0bb52..1a2e1f70191 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java +++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java @@ -134,7 +134,7 @@ public class InternalSearchResponse implements Streamable, ToXContent { aggregations = InternalAggregations.readAggregations(in); } if (in.readBoolean()) { - suggest = Suggest.readSuggest(Suggest.Fields.SUGGEST, in); + suggest = Suggest.readSuggest(in); } timedOut = in.readBoolean(); diff --git a/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java index 2b35e182161..ec47c6327cf 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java @@ -38,6 +38,7 @@ import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.object.ObjectMapper; +import org.elasticsearch.index.percolator.PercolatorQueryCache; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.IndexShard; @@ -217,6 +218,8 @@ public abstract class SearchContext implements Releasable { public abstract IndexFieldDataService fieldData(); + public abstract PercolatorQueryCache percolatorQueryCache(); + public abstract long timeoutInMillis(); public abstract void timeoutInMillis(long timeoutInMillis); diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java index 0f46461f4a2..31192350308 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.script.Template; import org.elasticsearch.search.Scroll; @@ -58,8 +59,7 @@ import static org.elasticsearch.search.Scroll.readScroll; public class ShardSearchLocalRequest implements ShardSearchRequest { - private String index; - private int shardId; + private ShardId shardId; private int numberOfShards; private SearchType searchType; private Scroll scroll; @@ -97,8 +97,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { public ShardSearchLocalRequest(ShardId shardId, int numberOfShards, SearchType searchType, SearchSourceBuilder source, String[] types, Boolean requestCache) { - this.index = shardId.getIndexName(); - this.shardId = shardId.id(); + this.shardId = shardId; this.numberOfShards = numberOfShards; this.searchType = searchType; this.source = source; @@ -106,13 +105,9 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { this.requestCache = requestCache; } - @Override - public String index() { - return index; - } @Override - public int shardId() { + public ShardId shardId() { return shardId; } @@ -177,8 +172,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { @SuppressWarnings("unchecked") protected void innerReadFrom(StreamInput in) throws IOException { - index = in.readString(); - shardId = in.readVInt(); + shardId = ShardId.readShardId(in); searchType = SearchType.fromId(in.readByte()); numberOfShards = in.readVInt(); if (in.readBoolean()) { @@ -195,8 +189,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { } protected void innerWriteTo(StreamOutput out, boolean asKey) throws IOException { - out.writeString(index); - out.writeVInt(shardId); + shardId.writeTo(out); out.writeByte(searchType.id()); if (!asKey) { out.writeVInt(numberOfShards); @@ -232,4 +225,15 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { // we could potentially keep it without copying, but then pay the price of extra unused bytes up to a page return out.bytes().copyBytesArray(); } + + @Override + public void rewrite(QueryShardContext context) throws IOException { + SearchSourceBuilder source = this.source; + SearchSourceBuilder rewritten = null; + while (rewritten != source) { + rewritten = source.rewrite(context); + source = rewritten; + } + this.source = source; + } } diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java index 1f0b3d1f188..aa148e215c8 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java @@ -21,6 +21,8 @@ package org.elasticsearch.search.internal; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.script.Template; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -34,9 +36,7 @@ import java.io.IOException; */ public interface ShardSearchRequest { - String index(); - - int shardId(); + ShardId shardId(); String[] types(); @@ -73,4 +73,10 @@ public interface ShardSearchRequest { * Returns the cache key for this shard search request, based on its content */ BytesReference cacheKey() throws IOException; + + /** + * Rewrites this request into its primitive form. e.g. by rewriting the + * QueryBuilder. + */ + void rewrite(QueryShardContext context) throws IOException; } diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java index 48ea31c170a..cd6460a686f 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java @@ -28,6 +28,8 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.script.Template; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -71,13 +73,9 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha return originalIndices.indicesOptions(); } - @Override - public String index() { - return shardSearchLocalRequest.index(); - } @Override - public int shardId() { + public ShardId shardId() { return shardSearchLocalRequest.shardId(); } @@ -159,4 +157,16 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha public boolean isProfile() { return shardSearchLocalRequest.isProfile(); } + + @Override + public void rewrite(QueryShardContext context) throws IOException { + shardSearchLocalRequest.rewrite(context); + } + + private ShardSearchTransportRequest shallowCopy(ShardSearchLocalRequest rewritten) { + ShardSearchTransportRequest newRequest = new ShardSearchTransportRequest(); + newRequest.originalIndices = originalIndices; + newRequest.shardSearchLocalRequest = rewritten; + return newRequest; + } } diff --git a/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java b/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java index 9223eb5a82d..2b82633ebfd 100644 --- a/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java +++ b/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java @@ -207,7 +207,7 @@ public class QuerySearchResult extends QuerySearchResultProvider { this.pipelineAggregators = pipelineAggregators; } if (in.readBoolean()) { - suggest = Suggest.readSuggest(Suggest.Fields.SUGGEST, in); + suggest = Suggest.readSuggest(in); } searchTimedOut = in.readBoolean(); terminatedEarly = in.readOptionalBoolean(); diff --git a/core/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java b/core/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java index ebb7615da44..bcdd94adf89 100644 --- a/core/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java +++ b/core/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java @@ -26,7 +26,6 @@ import org.elasticsearch.transport.TransportResponse; import java.io.IOException; -import static org.elasticsearch.search.SearchShardTarget.readSearchShardTarget; import static org.elasticsearch.search.query.QuerySearchResult.readQuerySearchResult; /** @@ -56,7 +55,7 @@ public class ScrollQuerySearchResult extends TransportResponse { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - shardTarget = readSearchShardTarget(in); + shardTarget = new SearchShardTarget(in); queryResult = readQuerySearchResult(in); queryResult.shardTarget(shardTarget); } diff --git a/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java index c415fd5a70b..a5707ea4a53 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java @@ -19,36 +19,64 @@ package org.elasticsearch.search.sort; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryParseContext; import java.io.IOException; +import java.util.Objects; /** * A sort builder to sort based on a document field. */ -public class FieldSortBuilder extends SortBuilder { +public class FieldSortBuilder extends SortBuilder implements SortBuilderParser { + static final FieldSortBuilder PROTOTYPE = new FieldSortBuilder(""); + public static final String NAME = "field_sort"; + public static final ParseField NESTED_PATH = new ParseField("nested_path"); + public static final ParseField NESTED_FILTER = new ParseField("nested_filter"); + public static final ParseField MISSING = new ParseField("missing"); + public static final ParseField ORDER = new ParseField("order"); + public static final ParseField REVERSE = new ParseField("reverse"); + public static final ParseField SORT_MODE = new ParseField("mode"); + public static final ParseField UNMAPPED_TYPE = new ParseField("unmapped_type"); private final String fieldName; - private SortOrder order; - private Object missing; - private Boolean ignoreUnmapped; - private String unmappedType; - private String sortMode; + private SortMode sortMode; - private QueryBuilder nestedFilter; + private QueryBuilder nestedFilter; private String nestedPath; + /** Copy constructor. */ + public FieldSortBuilder(FieldSortBuilder template) { + this(template.fieldName); + this.order(template.order()); + this.missing(template.missing()); + this.unmappedType(template.unmappedType()); + if (template.sortMode != null) { + this.sortMode(template.sortMode()); + } + this.setNestedFilter(template.getNestedFilter()); + this.setNestedPath(template.getNestedPath()); + } + /** * Constructs a new sort based on a document field. * - * @param fieldName The field name. + * @param fieldName + * The field name. */ public FieldSortBuilder(String fieldName) { if (fieldName == null) { @@ -57,42 +85,39 @@ public class FieldSortBuilder extends SortBuilder { this.fieldName = fieldName; } - /** - * The order of sorting. Defaults to {@link SortOrder#ASC}. - */ - @Override - public FieldSortBuilder order(SortOrder order) { - this.order = order; - return this; + /** Returns the document field this sort should be based on. */ + public String getFieldName() { + return this.fieldName; } /** * Sets the value when a field is missing in a doc. Can also be set to _last or * _first to sort missing last or first respectively. */ - @Override public FieldSortBuilder missing(Object missing) { - this.missing = missing; + if (missing instanceof String) { + this.missing = BytesRefs.toBytesRef(missing); + } else { + this.missing = missing; + } return this; } - /** - * Sets if the field does not exists in the index, it should be ignored and not sorted by or not. Defaults - * to false (not ignoring). - * @deprecated Use {@link #unmappedType(String)} instead. - */ - @Deprecated - public FieldSortBuilder ignoreUnmapped(boolean ignoreUnmapped) { - this.ignoreUnmapped = ignoreUnmapped; - return this; + /** Returns the value used when a field is missing in a doc. */ + public Object missing() { + if (missing instanceof BytesRef) { + return ((BytesRef) missing).utf8ToString(); + } + return missing; } /** * Set the type to use in case the current field is not mapped in an index. - * Specifying a type tells Elasticsearch what type the sort values should have, which is important - * for cross-index search, if there are sort fields that exist on some indices only. - * If the unmapped type is null then query execution will fail if one or more indices - * don't have a mapping for the current field. + * Specifying a type tells Elasticsearch what type the sort values should + * have, which is important for cross-index search, if there are sort fields + * that exist on some indices only. If the unmapped type is null + * then query execution will fail if one or more indices don't have a + * mapping for the current field. */ public FieldSortBuilder unmappedType(String type) { this.unmappedType = type; @@ -100,60 +125,244 @@ public class FieldSortBuilder extends SortBuilder { } /** - * Defines what values to pick in the case a document contains multiple values for the targeted sort field. - * Possible values: min, max, sum and avg + * Returns the type to use in case the current field is not mapped in an + * index. + */ + public String unmappedType() { + return this.unmappedType; + } + + /** + * Defines what values to pick in the case a document contains multiple + * values for the targeted sort field. Possible values: min, max, sum and + * avg + * *

      * The last two values are only applicable for number based fields. */ - public FieldSortBuilder sortMode(String sortMode) { + public FieldSortBuilder sortMode(SortMode sortMode) { + Objects.requireNonNull(sortMode, "sort mode cannot be null"); this.sortMode = sortMode; return this; } /** - * Sets the nested filter that the nested objects should match with in order to be taken into account - * for sorting. + * Returns what values to pick in the case a document contains multiple + * values for the targeted sort field. + */ + public SortMode sortMode() { + return this.sortMode; + } + + /** + * Sets the nested filter that the nested objects should match with in order + * to be taken into account for sorting. + * + * TODO should the above getters and setters be deprecated/ changed in + * favour of real getters and setters? */ public FieldSortBuilder setNestedFilter(QueryBuilder nestedFilter) { this.nestedFilter = nestedFilter; return this; } + /** + * Returns the nested filter that the nested objects should match with in + * order to be taken into account for sorting. + */ + public QueryBuilder getNestedFilter() { + return this.nestedFilter; + } /** - * Sets the nested path if sorting occurs on a field that is inside a nested object. By default when sorting on a - * field inside a nested object, the nearest upper nested object is selected as nested path. + * Sets the nested path if sorting occurs on a field that is inside a nested + * object. By default when sorting on a field inside a nested object, the + * nearest upper nested object is selected as nested path. */ public FieldSortBuilder setNestedPath(String nestedPath) { this.nestedPath = nestedPath; return this; } + /** + * Returns the nested path if sorting occurs in a field that is inside a + * nested object. + */ + public String getNestedPath() { + return this.nestedPath; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(fieldName); - if (order != null) { - builder.field("order", order.toString()); - } + builder.field(ORDER_FIELD.getPreferredName(), order); if (missing != null) { - builder.field("missing", missing); - } - if (ignoreUnmapped != null) { - builder.field(SortParseElement.IGNORE_UNMAPPED.getPreferredName(), ignoreUnmapped); + if (missing instanceof BytesRef) { + builder.field(MISSING.getPreferredName(), ((BytesRef) missing).utf8ToString()); + } else { + builder.field(MISSING.getPreferredName(), missing); + } } if (unmappedType != null) { - builder.field(SortParseElement.UNMAPPED_TYPE.getPreferredName(), unmappedType); + builder.field(UNMAPPED_TYPE.getPreferredName(), unmappedType); } if (sortMode != null) { - builder.field("mode", sortMode); + builder.field(SORT_MODE.getPreferredName(), sortMode); } if (nestedFilter != null) { - builder.field("nested_filter", nestedFilter, params); + builder.field(NESTED_FILTER.getPreferredName(), nestedFilter, params); } if (nestedPath != null) { - builder.field("nested_path", nestedPath); + builder.field(NESTED_PATH.getPreferredName(), nestedPath); } builder.endObject(); return builder; } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + FieldSortBuilder builder = (FieldSortBuilder) other; + return (Objects.equals(this.fieldName, builder.fieldName) && Objects.equals(this.nestedFilter, builder.nestedFilter) + && Objects.equals(this.nestedPath, builder.nestedPath) && Objects.equals(this.missing, builder.missing) + && Objects.equals(this.order, builder.order) && Objects.equals(this.sortMode, builder.sortMode) + && Objects.equals(this.unmappedType, builder.unmappedType)); + } + + @Override + public int hashCode() { + return Objects.hash(this.fieldName, this.nestedFilter, this.nestedPath, this.missing, this.order, this.sortMode, this.unmappedType); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(this.fieldName); + if (this.nestedFilter != null) { + out.writeBoolean(true); + out.writeQuery(this.nestedFilter); + } else { + out.writeBoolean(false); + } + out.writeOptionalString(this.nestedPath); + out.writeGenericValue(this.missing); + + if (this.order != null) { + out.writeBoolean(true); + this.order.writeTo(out); + } else { + out.writeBoolean(false); + } + + out.writeBoolean(this.sortMode != null); + if (this.sortMode != null) { + this.sortMode.writeTo(out); + } + out.writeOptionalString(this.unmappedType); + } + + @Override + public FieldSortBuilder readFrom(StreamInput in) throws IOException { + String fieldName = in.readString(); + FieldSortBuilder result = new FieldSortBuilder(fieldName); + if (in.readBoolean()) { + QueryBuilder query = in.readQuery(); + result.setNestedFilter(query); + } + result.setNestedPath(in.readOptionalString()); + result.missing(in.readGenericValue()); + + if (in.readBoolean()) { + result.order(SortOrder.readOrderFrom(in)); + } + if (in.readBoolean()) { + result.sortMode(SortMode.PROTOTYPE.readFrom(in)); + } + result.unmappedType(in.readOptionalString()); + return result; + } + + @Override + public FieldSortBuilder fromXContent(QueryParseContext context, String fieldName) throws IOException { + XContentParser parser = context.parser(); + + QueryBuilder nestedFilter = null; + String nestedPath = null; + Object missing = null; + SortOrder order = null; + SortMode sortMode = null; + String unmappedType = null; + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + if (context.parseFieldMatcher().match(currentFieldName, NESTED_FILTER)) { + nestedFilter = context.parseInnerQueryBuilder(); + } else { + throw new ParsingException(parser.getTokenLocation(), "Expected " + NESTED_FILTER.getPreferredName() + " element."); + } + } else if (token.isValue()) { + if (context.parseFieldMatcher().match(currentFieldName, NESTED_PATH)) { + nestedPath = parser.text(); + } else if (context.parseFieldMatcher().match(currentFieldName, MISSING)) { + missing = parser.objectBytes(); + } else if (context.parseFieldMatcher().match(currentFieldName, REVERSE)) { + if (parser.booleanValue()) { + order = SortOrder.DESC; + } + // else we keep the default ASC + } else if (context.parseFieldMatcher().match(currentFieldName, ORDER)) { + String sortOrder = parser.text(); + if ("asc".equals(sortOrder)) { + order = SortOrder.ASC; + } else if ("desc".equals(sortOrder)) { + order = SortOrder.DESC; + } else { + throw new IllegalStateException("Sort order " + sortOrder + " not supported."); + } + } else if (context.parseFieldMatcher().match(currentFieldName, SORT_MODE)) { + sortMode = SortMode.fromString(parser.text()); + } else if (context.parseFieldMatcher().match(currentFieldName, UNMAPPED_TYPE)) { + unmappedType = parser.text(); + } else { + throw new IllegalArgumentException("Option " + currentFieldName + " not supported."); + } + } + } + + FieldSortBuilder builder = new FieldSortBuilder(fieldName); + if (nestedFilter != null) { + builder.setNestedFilter(nestedFilter); + } + if (nestedPath != null) { + builder.setNestedPath(nestedPath); + } + if (missing != null) { + builder.missing(missing); + } + if (order != null) { + builder.order(order); + } + if (sortMode != null) { + builder.sortMode(sortMode); + } + if (unmappedType != null) { + builder.unmappedType(unmappedType); + } + return builder; + } } diff --git a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java index e37eed61c6d..9785a0fc240 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java @@ -23,16 +23,13 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; -import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.DistanceUnit; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.search.MultiValueMode; import java.io.IOException; import java.util.ArrayList; @@ -44,8 +41,7 @@ import java.util.Objects; /** * A geo distance based sorting on a geo point like field. */ -public class GeoDistanceSortBuilder extends SortBuilder - implements ToXContent, NamedWriteable, SortElementParserTemp { +public class GeoDistanceSortBuilder extends SortBuilder implements SortBuilderParser { public static final String NAME = "_geo_distance"; public static final boolean DEFAULT_COERCE = false; public static final boolean DEFAULT_IGNORE_MALFORMED = false; @@ -57,14 +53,12 @@ public class GeoDistanceSortBuilder extends SortBuilder private GeoDistance geoDistance = GeoDistance.DEFAULT; private DistanceUnit unit = DistanceUnit.DEFAULT; - private SortOrder order = SortOrder.ASC; - - // TODO there is an enum that covers that parameter which we should be using here - private String sortMode = null; + + private SortMode sortMode = null; @SuppressWarnings("rawtypes") private QueryBuilder nestedFilter; private String nestedPath; - + // TODO switch to GeoValidationMethod enum private boolean coerce = DEFAULT_COERCE; private boolean ignoreMalformed = DEFAULT_IGNORE_MALFORMED; @@ -109,7 +103,7 @@ public class GeoDistanceSortBuilder extends SortBuilder } this.fieldName = fieldName; } - + /** * Copy constructor. * */ @@ -125,7 +119,7 @@ public class GeoDistanceSortBuilder extends SortBuilder this.coerce = original.coerce; this.ignoreMalformed = original.ignoreMalformed; } - + /** * Returns the geo point like field the distance based sort operates on. * */ @@ -153,7 +147,7 @@ public class GeoDistanceSortBuilder extends SortBuilder this.points.addAll(Arrays.asList(points)); return this; } - + /** * Returns the points to create the range distance facets from. */ @@ -163,7 +157,7 @@ public class GeoDistanceSortBuilder extends SortBuilder /** * The geohash of the geo point to create the range distance facets from. - * + * * Deprecated - please use points(GeoPoint... points) instead. */ @Deprecated @@ -173,7 +167,7 @@ public class GeoDistanceSortBuilder extends SortBuilder } return this; } - + /** * The geo distance type used to compute the distance. */ @@ -181,7 +175,7 @@ public class GeoDistanceSortBuilder extends SortBuilder this.geoDistance = geoDistance; return this; } - + /** * Returns the geo distance type used to compute the distance. */ @@ -204,37 +198,13 @@ public class GeoDistanceSortBuilder extends SortBuilder return this.unit; } - /** - * The order of sorting. Defaults to {@link SortOrder#ASC}. - */ - @Override - public GeoDistanceSortBuilder order(SortOrder order) { - this.order = order; - return this; - } - - /** Returns the order of sorting. */ - public SortOrder order() { - return this.order; - } - - /** - * Not relevant. - * - * TODO should this throw an exception rather than silently ignore a parameter that is not used? - */ - @Override - public GeoDistanceSortBuilder missing(Object missing) { - return this; - } - /** * Defines which distance to use for sorting in the case a document contains multiple geo points. * Possible values: min and max */ - public GeoDistanceSortBuilder sortMode(String sortMode) { - MultiValueMode temp = MultiValueMode.fromString(sortMode); - if (temp == MultiValueMode.SUM) { + public GeoDistanceSortBuilder sortMode(SortMode sortMode) { + Objects.requireNonNull(sortMode, "sort mode cannot be null"); + if (sortMode == SortMode.SUM) { throw new IllegalArgumentException("sort_mode [sum] isn't supported for sorting by geo distance"); } this.sortMode = sortMode; @@ -242,7 +212,7 @@ public class GeoDistanceSortBuilder extends SortBuilder } /** Returns which distance to use for sorting in the case a document contains multiple geo points. */ - public String sortMode() { + public SortMode sortMode() { return this.sortMode; } @@ -250,16 +220,16 @@ public class GeoDistanceSortBuilder extends SortBuilder * Sets the nested filter that the nested objects should match with in order to be taken into account * for sorting. */ - public GeoDistanceSortBuilder setNestedFilter(QueryBuilder nestedFilter) { + public GeoDistanceSortBuilder setNestedFilter(QueryBuilder nestedFilter) { this.nestedFilter = nestedFilter; return this; } - /** + /** * Returns the nested filter that the nested objects should match with in order to be taken into account - * for sorting. + * for sorting. **/ - public QueryBuilder getNestedFilter() { + public QueryBuilder getNestedFilter() { return this.nestedFilter; } @@ -271,7 +241,7 @@ public class GeoDistanceSortBuilder extends SortBuilder this.nestedPath = nestedPath; return this; } - + /** * Returns the nested path if sorting occurs on a field that is inside a nested object. By default when sorting on a * field inside a nested object, the nearest upper nested object is selected as nested path. @@ -295,7 +265,7 @@ public class GeoDistanceSortBuilder extends SortBuilder } return this; } - + public boolean ignoreMalformed() { return this.ignoreMalformed; } @@ -312,11 +282,7 @@ public class GeoDistanceSortBuilder extends SortBuilder builder.field("unit", unit); builder.field("distance_type", geoDistance.name().toLowerCase(Locale.ROOT)); - if (order == SortOrder.DESC) { - builder.field("reverse", true); - } else { - builder.field("reverse", false); - } + builder.field(ORDER_FIELD.getPreferredName(), order); if (sortMode != null) { builder.field("mode", sortMode); @@ -373,11 +339,14 @@ public class GeoDistanceSortBuilder extends SortBuilder public void writeTo(StreamOutput out) throws IOException { out.writeString(fieldName); out.writeGenericValue(points); - + geoDistance.writeTo(out); unit.writeTo(out); order.writeTo(out); - out.writeOptionalString(sortMode); + out.writeBoolean(this.sortMode != null); + if (this.sortMode != null) { + sortMode.writeTo(out); + } if (nestedFilter != null) { out.writeBoolean(true); out.writeQuery(nestedFilter); @@ -392,16 +361,15 @@ public class GeoDistanceSortBuilder extends SortBuilder @Override public GeoDistanceSortBuilder readFrom(StreamInput in) throws IOException { String fieldName = in.readString(); - - ArrayList points = (ArrayList) in.readGenericValue(); + + ArrayList points = (ArrayList) in.readGenericValue(); GeoDistanceSortBuilder result = new GeoDistanceSortBuilder(fieldName, points.toArray(new GeoPoint[points.size()])); - + result.geoDistance(GeoDistance.readGeoDistanceFrom(in)); result.unit(DistanceUnit.readDistanceUnit(in)); result.order(SortOrder.readOrderFrom(in)); - String sortMode = in.readOptionalString(); - if (sortMode != null) { - result.sortMode(sortMode); + if (in.readBoolean()) { + result.sortMode = SortMode.PROTOTYPE.readFrom(in); } if (in.readBoolean()) { result.setNestedFilter(in.readQuery()); @@ -419,9 +387,9 @@ public class GeoDistanceSortBuilder extends SortBuilder List geoPoints = new ArrayList<>(); DistanceUnit unit = DistanceUnit.DEFAULT; GeoDistance geoDistance = GeoDistance.DEFAULT; - boolean reverse = false; - MultiValueMode sortMode = null; - QueryBuilder nestedFilter = null; + SortOrder order = SortOrder.ASC; + SortMode sortMode = null; + QueryBuilder nestedFilter = null; String nestedPath = null; boolean coerce = GeoDistanceSortBuilder.DEFAULT_COERCE; @@ -439,8 +407,8 @@ public class GeoDistanceSortBuilder extends SortBuilder } else if (token == XContentParser.Token.START_OBJECT) { // the json in the format of -> field : { lat : 30, lon : 12 } if ("nested_filter".equals(currentName) || "nestedFilter".equals(currentName)) { - // TODO Note to remember: while this is kept as a QueryBuilder internally, - // we need to make sure to call toFilter() on it once on the shard + // TODO Note to remember: while this is kept as a QueryBuilder internally, + // we need to make sure to call toFilter() on it once on the shard // (e.g. in the new build() method) nestedFilter = context.parseInnerQueryBuilder(); } else { @@ -451,9 +419,9 @@ public class GeoDistanceSortBuilder extends SortBuilder } } else if (token.isValue()) { if ("reverse".equals(currentName)) { - reverse = parser.booleanValue(); + order = parser.booleanValue() ? SortOrder.DESC : SortOrder.ASC; } else if ("order".equals(currentName)) { - reverse = "desc".equals(parser.text()); + order = SortOrder.fromString(parser.text()); } else if ("unit".equals(currentName)) { unit = DistanceUnit.fromString(parser.text()); } else if ("distance_type".equals(currentName) || "distanceType".equals(currentName)) { @@ -469,7 +437,7 @@ public class GeoDistanceSortBuilder extends SortBuilder ignoreMalformed = ignore_malformed_value; } } else if ("sort_mode".equals(currentName) || "sortMode".equals(currentName) || "mode".equals(currentName)) { - sortMode = MultiValueMode.fromString(parser.text()); + sortMode = SortMode.fromString(parser.text()); } else if ("nested_path".equals(currentName) || "nestedPath".equals(currentName)) { nestedPath = parser.text(); } else { @@ -484,13 +452,9 @@ public class GeoDistanceSortBuilder extends SortBuilder GeoDistanceSortBuilder result = new GeoDistanceSortBuilder(fieldName, geoPoints.toArray(new GeoPoint[geoPoints.size()])); result.geoDistance(geoDistance); result.unit(unit); - if (reverse) { - result.order(SortOrder.DESC); - } else { - result.order(SortOrder.ASC); - } + result.order(order); if (sortMode != null) { - result.sortMode(sortMode.name()); + result.sortMode(sortMode); } result.setNestedFilter(nestedFilter); result.setNestedPath(nestedPath); diff --git a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java index 27c8b8e0ed5..d1eabf89e45 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java +++ b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java @@ -43,9 +43,9 @@ import org.elasticsearch.index.fielddata.MultiGeoPointValues; import org.elasticsearch.index.fielddata.NumericDoubleValues; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.support.NestedInnerQueryParseSupport; import org.elasticsearch.search.MultiValueMode; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.ArrayList; @@ -62,7 +62,7 @@ public class GeoDistanceSortParser implements SortParser { } @Override - public SortField parse(XContentParser parser, SearchContext context) throws Exception { + public SortField parse(XContentParser parser, QueryShardContext context) throws Exception { String fieldName = null; List geoPoints = new ArrayList<>(); DistanceUnit unit = DistanceUnit.DEFAULT; @@ -71,7 +71,7 @@ public class GeoDistanceSortParser implements SortParser { MultiValueMode sortMode = null; NestedInnerQueryParseSupport nestedHelper = null; - final boolean indexCreatedBeforeV2_0 = context.indexShard().indexSettings().getIndexVersionCreated().before(Version.V_2_0_0); + final boolean indexCreatedBeforeV2_0 = context.indexVersionCreated().before(Version.V_2_0_0); boolean coerce = GeoDistanceSortBuilder.DEFAULT_COERCE; boolean ignoreMalformed = GeoDistanceSortBuilder.DEFAULT_IGNORE_MALFORMED; @@ -111,8 +111,11 @@ public class GeoDistanceSortParser implements SortParser { if (coerce == true) { ignoreMalformed = true; } - } else if ("ignore_malformed".equals(currentName) && coerce == false) { - ignoreMalformed = parser.booleanValue(); + } else if ("ignore_malformed".equals(currentName)) { + boolean ignoreMalformedFlag = parser.booleanValue(); + if (coerce == false) { + ignoreMalformed = ignoreMalformedFlag; + } } else if ("sort_mode".equals(currentName) || "sortMode".equals(currentName) || "mode".equals(currentName)) { sortMode = MultiValueMode.fromString(parser.text()); } else if ("nested_path".equals(currentName) || "nestedPath".equals(currentName)) { @@ -155,12 +158,12 @@ public class GeoDistanceSortParser implements SortParser { throw new IllegalArgumentException("sort_mode [sum] isn't supported for sorting by geo distance"); } - MappedFieldType fieldType = context.smartNameFieldType(fieldName); + MappedFieldType fieldType = context.fieldMapper(fieldName); if (fieldType == null) { throw new IllegalArgumentException("failed to find mapper for [" + fieldName + "] for geo distance based sort"); } final MultiValueMode finalSortMode = sortMode; // final reference for use in the anonymous class - final IndexGeoPointFieldData geoIndexFieldData = context.fieldData().getForField(fieldType); + final IndexGeoPointFieldData geoIndexFieldData = context.getForField(fieldType); final FixedSourceDistance[] distances = new FixedSourceDistance[geoPoints.size()]; for (int i = 0; i< geoPoints.size(); i++) { distances[i] = geoDistance.fixedSourceDistance(geoPoints.get(i).lat(), geoPoints.get(i).lon(), unit); @@ -168,15 +171,16 @@ public class GeoDistanceSortParser implements SortParser { final Nested nested; if (nestedHelper != null && nestedHelper.getPath() != null) { - BitSetProducer rootDocumentsFilter = context.bitsetFilterCache().getBitSetProducer(Queries.newNonNestedFilter()); - Query innerDocumentsFilter; + BitSetProducer rootDocumentsFilter = context.bitsetFilter(Queries.newNonNestedFilter()); + Query innerDocumentsQuery; if (nestedHelper.filterFound()) { // TODO: use queries instead - innerDocumentsFilter = nestedHelper.getInnerFilter(); + innerDocumentsQuery = nestedHelper.getInnerFilter(); } else { - innerDocumentsFilter = nestedHelper.getNestedObjectMapper().nestedTypeFilter(); + innerDocumentsQuery = nestedHelper.getNestedObjectMapper().nestedTypeFilter(); } - nested = new Nested(rootDocumentsFilter, context.searcher().createNormalizedWeight(innerDocumentsFilter, false)); + + nested = new Nested(rootDocumentsFilter, innerDocumentsQuery); } else { nested = null; } diff --git a/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java index 7435ff95f45..76ca56f0f9f 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java @@ -19,40 +19,101 @@ package org.elasticsearch.search.sort; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryParseContext; import java.io.IOException; +import java.util.Objects; /** * A sort builder allowing to sort by score. - * - * */ -public class ScoreSortBuilder extends SortBuilder { +public class ScoreSortBuilder extends SortBuilder implements SortBuilderParser { - private SortOrder order; + private static final String NAME = "_score"; + static final ScoreSortBuilder PROTOTYPE = new ScoreSortBuilder(); + public static final ParseField REVERSE_FIELD = new ParseField("reverse"); + public static final ParseField ORDER_FIELD = new ParseField("order"); - /** - * The order of sort scoring. By default, its {@link SortOrder#DESC}. - */ - @Override - public ScoreSortBuilder order(SortOrder order) { - this.order = order; - return this; + public ScoreSortBuilder() { + // order defaults to desc when sorting on the _score + order(SortOrder.DESC); } - @Override - public SortBuilder missing(Object missing) { - return this; - } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject("_score"); - if (order == SortOrder.ASC) { - builder.field("reverse", true); - } + builder.startObject(NAME); + builder.field(ORDER_FIELD.getPreferredName(), order); builder.endObject(); return builder; } + + @Override + public ScoreSortBuilder fromXContent(QueryParseContext context, String elementName) throws IOException { + XContentParser parser = context.parser(); + ParseFieldMatcher matcher = context.parseFieldMatcher(); + + XContentParser.Token token; + String currentName = parser.currentName(); + ScoreSortBuilder result = new ScoreSortBuilder(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentName = parser.currentName(); + } else if (token.isValue()) { + if (matcher.match(currentName, REVERSE_FIELD)) { + if (parser.booleanValue()) { + result.order(SortOrder.ASC); + } + // else we keep the default DESC + } else if (matcher.match(currentName, ORDER_FIELD)) { + result.order(SortOrder.fromString(parser.text())); + } else { + throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] failed to parse field [" + currentName + "]"); + } + } else { + throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] unexpected token [" + token + "]"); + } + } + return result; + } + + @Override + public boolean equals(Object object) { + if (this == object) { + return true; + } + if (object == null || getClass() != object.getClass()) { + return false; + } + ScoreSortBuilder other = (ScoreSortBuilder) object; + return Objects.equals(order, other.order); + } + + @Override + public int hashCode() { + return Objects.hash(this.order); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + order.writeTo(out); + } + + @Override + public ScoreSortBuilder readFrom(StreamInput in) throws IOException { + ScoreSortBuilder builder = new ScoreSortBuilder().order(SortOrder.readOrderFrom(in)); + return builder; + } + + @Override + public String getWriteableName() { + return NAME; + } } diff --git a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java index e9a9c8df57c..e77d12ce478 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java @@ -19,26 +19,48 @@ package org.elasticsearch.search.sort; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.script.Script; +import org.elasticsearch.script.Script.ScriptField; +import org.elasticsearch.script.ScriptParameterParser; +import org.elasticsearch.script.ScriptParameterParser.ScriptParameterValue; import java.io.IOException; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; /** * Script sort builder allows to sort based on a custom script expression. */ -public class ScriptSortBuilder extends SortBuilder { +public class ScriptSortBuilder extends SortBuilder implements SortBuilderParser { - private Script script; + private static final String NAME = "_script"; + static final ScriptSortBuilder PROTOTYPE = new ScriptSortBuilder(new Script("_na_"), ScriptSortType.STRING); + public static final ParseField TYPE_FIELD = new ParseField("type"); + public static final ParseField SCRIPT_FIELD = new ParseField("script"); + public static final ParseField SORTMODE_FIELD = new ParseField("mode"); + public static final ParseField NESTED_PATH_FIELD = new ParseField("nested_path"); + public static final ParseField NESTED_FILTER_FIELD = new ParseField("nested_filter"); + public static final ParseField PARAMS_FIELD = new ParseField("params"); - private final String type; + private final Script script; - private SortOrder order; + private ScriptSortType type; - private String sortMode; + private SortMode sortMode; - private QueryBuilder nestedFilter; + private QueryBuilder nestedFilter; private String nestedPath; @@ -47,47 +69,73 @@ public class ScriptSortBuilder extends SortBuilder { * * @param script * The script to use. + * @param type + * The type of the script, can be either {@link ScriptSortType#STRING} or + * {@link ScriptSortType#NUMBER} */ - public ScriptSortBuilder(Script script, String type) { + public ScriptSortBuilder(Script script, ScriptSortType type) { + Objects.requireNonNull(script, "script cannot be null"); + Objects.requireNonNull(type, "type cannot be null"); this.script = script; this.type = type; } - /** - * Sets the sort order. - */ - @Override - public ScriptSortBuilder order(SortOrder order) { - this.order = order; - return this; + ScriptSortBuilder(ScriptSortBuilder original) { + this.script = original.script; + this.type = original.type; + this.order = original.order; + this.sortMode = original.sortMode; + this.nestedFilter = original.nestedFilter; + this.nestedPath = original.nestedPath; } /** - * Not really relevant. + * Get the script used in this sort. */ - @Override - public SortBuilder missing(Object missing) { - return this; + public Script script() { + return this.script; + } + + /** + * Get the type used in this sort. + */ + public ScriptSortType type() { + return this.type; } /** * Defines which distance to use for sorting in the case a document contains multiple geo points. * Possible values: min and max */ - public ScriptSortBuilder sortMode(String sortMode) { + public ScriptSortBuilder sortMode(SortMode sortMode) { + Objects.requireNonNull(sortMode, "sort mode cannot be null."); this.sortMode = sortMode; return this; } + /** + * Get the sort mode. + */ + public SortMode sortMode() { + return this.sortMode; + } + /** * Sets the nested filter that the nested objects should match with in order to be taken into account * for sorting. */ - public ScriptSortBuilder setNestedFilter(QueryBuilder nestedFilter) { + public ScriptSortBuilder setNestedFilter(QueryBuilder nestedFilter) { this.nestedFilter = nestedFilter; return this; } + /** + * Gets the nested filter. + */ + public QueryBuilder getNestedFilter() { + return this.nestedFilter; + } + /** * Sets the nested path if sorting occurs on a field that is inside a nested object. For sorting by script this * needs to be specified. @@ -97,24 +145,200 @@ public class ScriptSortBuilder extends SortBuilder { return this; } + /** + * Gets the nested path. + */ + public String getNestedPath() { + return this.nestedPath; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params builderParams) throws IOException { - builder.startObject("_script"); - builder.field("script", script); - builder.field("type", type); - if (order == SortOrder.DESC) { - builder.field("reverse", true); - } + builder.startObject(NAME); + builder.field(SCRIPT_FIELD.getPreferredName(), script); + builder.field(TYPE_FIELD.getPreferredName(), type); + builder.field(ORDER_FIELD.getPreferredName(), order); if (sortMode != null) { - builder.field("mode", sortMode); + builder.field(SORTMODE_FIELD.getPreferredName(), sortMode); } if (nestedPath != null) { - builder.field("nested_path", nestedPath); + builder.field(NESTED_PATH_FIELD.getPreferredName(), nestedPath); } if (nestedFilter != null) { - builder.field("nested_filter", nestedFilter, builderParams); + builder.field(NESTED_FILTER_FIELD.getPreferredName(), nestedFilter, builderParams); } builder.endObject(); return builder; } + + @Override + public ScriptSortBuilder fromXContent(QueryParseContext context, String elementName) throws IOException { + ScriptParameterParser scriptParameterParser = new ScriptParameterParser(); + XContentParser parser = context.parser(); + ParseFieldMatcher parseField = context.parseFieldMatcher(); + Script script = null; + ScriptSortType type = null; + SortMode sortMode = null; + SortOrder order = null; + QueryBuilder nestedFilter = null; + String nestedPath = null; + Map params = new HashMap<>(); + + XContentParser.Token token; + String currentName = parser.currentName(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentName = parser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + if (parseField.match(currentName, ScriptField.SCRIPT)) { + script = Script.parse(parser, parseField); + } else if (parseField.match(currentName, PARAMS_FIELD)) { + params = parser.map(); + } else if (parseField.match(currentName, NESTED_FILTER_FIELD)) { + nestedFilter = context.parseInnerQueryBuilder(); + } else { + throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] failed to parse field [" + currentName + "]"); + } + } else if (token.isValue()) { + if (parseField.match(currentName, ORDER_FIELD)) { + order = SortOrder.fromString(parser.text()); + } else if (scriptParameterParser.token(currentName, token, parser, parseField)) { + // Do Nothing (handled by ScriptParameterParser + } else if (parseField.match(currentName, TYPE_FIELD)) { + type = ScriptSortType.fromString(parser.text()); + } else if (parseField.match(currentName, SORTMODE_FIELD)) { + sortMode = SortMode.fromString(parser.text()); + } else if (parseField.match(currentName, NESTED_PATH_FIELD)) { + nestedPath = parser.text(); + } else { + throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] failed to parse field [" + currentName + "]"); + } + } else { + throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] unexpected token [" + token + "]"); + } + } + + if (script == null) { // Didn't find anything using the new API so try using the old one instead + ScriptParameterValue scriptValue = scriptParameterParser.getDefaultScriptParameterValue(); + if (scriptValue != null) { + if (params == null) { + params = new HashMap<>(); + } + script = new Script(scriptValue.script(), scriptValue.scriptType(), scriptParameterParser.lang(), params); + } + } + + ScriptSortBuilder result = new ScriptSortBuilder(script, type); + if (order != null) { + result.order(order); + } + if (sortMode != null) { + result.sortMode(sortMode); + } + if (nestedFilter != null) { + result.setNestedFilter(nestedFilter); + } + if (nestedPath != null) { + result.setNestedPath(nestedPath); + } + return result; + } + + @Override + public boolean equals(Object object) { + if (this == object) { + return true; + } + if (object == null || getClass() != object.getClass()) { + return false; + } + ScriptSortBuilder other = (ScriptSortBuilder) object; + return Objects.equals(script, other.script) && + Objects.equals(type, other.type) && + Objects.equals(order, other.order) && + Objects.equals(sortMode, other.sortMode) && + Objects.equals(nestedFilter, other.nestedFilter) && + Objects.equals(nestedPath, other.nestedPath); + } + + @Override + public int hashCode() { + return Objects.hash(script, type, order, sortMode, nestedFilter, nestedPath); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + script.writeTo(out); + type.writeTo(out); + order.writeTo(out); + out.writeBoolean(sortMode != null); + if (sortMode != null) { + sortMode.writeTo(out); + } + out.writeOptionalString(nestedPath); + boolean hasNestedFilter = nestedFilter != null; + out.writeBoolean(hasNestedFilter); + if (hasNestedFilter) { + out.writeQuery(nestedFilter); + } + } + + @Override + public ScriptSortBuilder readFrom(StreamInput in) throws IOException { + ScriptSortBuilder builder = new ScriptSortBuilder(Script.readScript(in), ScriptSortType.PROTOTYPE.readFrom(in)); + builder.order(SortOrder.readOrderFrom(in)); + if (in.readBoolean()) { + builder.sortMode(SortMode.PROTOTYPE.readFrom(in)); + } + builder.nestedPath = in.readOptionalString(); + if (in.readBoolean()) { + builder.nestedFilter = in.readQuery(); + } + return builder; + } + + @Override + public String getWriteableName() { + return NAME; + } + + public enum ScriptSortType implements Writeable { + /** script sort for a string value **/ + STRING, + /** script sort for a numeric value **/ + NUMBER; + + static ScriptSortType PROTOTYPE = STRING; + + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeVInt(ordinal()); + } + + @Override + public ScriptSortType readFrom(final StreamInput in) throws IOException { + int ordinal = in.readVInt(); + if (ordinal < 0 || ordinal >= values().length) { + throw new IOException("Unknown ScriptSortType ordinal [" + ordinal + "]"); + } + return values()[ordinal]; + } + + public static ScriptSortType fromString(final String str) { + Objects.requireNonNull(str, "input string is null"); + switch (str.toLowerCase(Locale.ROOT)) { + case ("string"): + return ScriptSortType.STRING; + case ("number"): + return ScriptSortType.NUMBER; + default: + throw new IllegalArgumentException("Unknown ScriptSortType [" + str + "]"); + } + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + } } diff --git a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java index e4fe2c08f75..c238ad6ccaf 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java +++ b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java @@ -27,6 +27,7 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.join.BitSetProducer; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.FieldData; @@ -37,6 +38,7 @@ import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource; import org.elasticsearch.index.fielddata.fieldcomparator.DoubleValuesComparatorSource; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.support.NestedInnerQueryParseSupport; import org.elasticsearch.script.LeafSearchScript; import org.elasticsearch.script.Script; @@ -46,8 +48,7 @@ import org.elasticsearch.script.ScriptParameterParser; import org.elasticsearch.script.ScriptParameterParser.ScriptParameterValue; import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.MultiValueMode; -import org.elasticsearch.search.SearchParseException; -import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.search.sort.ScriptSortBuilder.ScriptSortType; import java.io.IOException; import java.util.Collections; @@ -59,19 +60,16 @@ import java.util.Map; */ public class ScriptSortParser implements SortParser { - private static final String STRING_SORT_TYPE = "string"; - private static final String NUMBER_SORT_TYPE = "number"; - @Override public String[] names() { return new String[]{"_script"}; } @Override - public SortField parse(XContentParser parser, SearchContext context) throws Exception { + public SortField parse(XContentParser parser, QueryShardContext context) throws Exception { ScriptParameterParser scriptParameterParser = new ScriptParameterParser(); Script script = null; - String type = null; + ScriptSortType type = null; Map params = null; boolean reverse = false; MultiValueMode sortMode = null; @@ -101,7 +99,7 @@ public class ScriptSortParser implements SortParser { } else if (scriptParameterParser.token(currentName, token, parser, context.parseFieldMatcher())) { // Do Nothing (handled by ScriptParameterParser } else if ("type".equals(currentName)) { - type = parser.text(); + type = ScriptSortType.fromString(parser.text()); } else if ("mode".equals(currentName)) { sortMode = MultiValueMode.fromString(parser.text()); } else if ("nested_path".equals(currentName) || "nestedPath".equals(currentName)) { @@ -122,19 +120,20 @@ public class ScriptSortParser implements SortParser { script = new Script(scriptValue.script(), scriptValue.scriptType(), scriptParameterParser.lang(), params); } } else if (params != null) { - throw new SearchParseException(context, "script params must be specified inside script object", parser.getTokenLocation()); + throw new ParsingException(parser.getTokenLocation(), "script params must be specified inside script object"); } if (script == null) { - throw new SearchParseException(context, "_script sorting requires setting the script to sort by", parser.getTokenLocation()); + throw new ParsingException(parser.getTokenLocation(), "_script sorting requires setting the script to sort by"); } if (type == null) { - throw new SearchParseException(context, "_script sorting requires setting the type of the script", parser.getTokenLocation()); + throw new ParsingException(parser.getTokenLocation(), "_script sorting requires setting the type of the script"); } - final SearchScript searchScript = context.scriptService().search(context.lookup(), script, ScriptContext.Standard.SEARCH, Collections.emptyMap()); + final SearchScript searchScript = context.getScriptService().search( + context.lookup(), script, ScriptContext.Standard.SEARCH, Collections.emptyMap()); - if (STRING_SORT_TYPE.equals(type) && (sortMode == MultiValueMode.SUM || sortMode == MultiValueMode.AVG)) { - throw new SearchParseException(context, "type [string] doesn't support mode [" + sortMode + "]", parser.getTokenLocation()); + if (ScriptSortType.STRING.equals(type) && (sortMode == MultiValueMode.SUM || sortMode == MultiValueMode.AVG)) { + throw new ParsingException(parser.getTokenLocation(), "type [string] doesn't support mode [" + sortMode + "]"); } if (sortMode == null) { @@ -144,7 +143,7 @@ public class ScriptSortParser implements SortParser { // If nested_path is specified, then wrap the `fieldComparatorSource` in a `NestedFieldComparatorSource` final Nested nested; if (nestedHelper != null && nestedHelper.getPath() != null) { - BitSetProducer rootDocumentsFilter = context.bitsetFilterCache().getBitSetProducer(Queries.newNonNestedFilter()); + BitSetProducer rootDocumentsFilter = context.bitsetFilter(Queries.newNonNestedFilter()); Query innerDocumentsFilter; if (nestedHelper.filterFound()) { // TODO: use queries instead @@ -152,14 +151,14 @@ public class ScriptSortParser implements SortParser { } else { innerDocumentsFilter = nestedHelper.getNestedObjectMapper().nestedTypeFilter(); } - nested = new Nested(rootDocumentsFilter, context.searcher().createNormalizedWeight(innerDocumentsFilter, false)); + nested = new Nested(rootDocumentsFilter, innerDocumentsFilter); } else { nested = null; } final IndexFieldData.XFieldComparatorSource fieldComparatorSource; switch (type) { - case STRING_SORT_TYPE: + case STRING: fieldComparatorSource = new BytesRefFieldComparatorSource(null, null, sortMode, nested) { LeafSearchScript leafScript; @Override @@ -182,7 +181,7 @@ public class ScriptSortParser implements SortParser { } }; break; - case NUMBER_SORT_TYPE: + case NUMBER: // TODO: should we rather sort missing values last? fieldComparatorSource = new DoubleValuesComparatorSource(null, Double.MAX_VALUE, sortMode, nested) { LeafSearchScript leafScript; @@ -205,7 +204,7 @@ public class ScriptSortParser implements SortParser { }; break; default: - throw new SearchParseException(context, "custom script sort type [" + type + "] not supported", parser.getTokenLocation()); + throw new ParsingException(parser.getTokenLocation(), "custom script sort type [" + type + "] not supported"); } return new SortField("_script", fieldComparatorSource, reverse); diff --git a/core/src/main/java/org/elasticsearch/search/sort/SortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/SortBuilder.java index da80506dde2..7852af4e97e 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/SortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/SortBuilder.java @@ -20,14 +20,20 @@ package org.elasticsearch.search.sort; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import java.util.Objects; + /** * */ -public abstract class SortBuilder implements ToXContent { +public abstract class SortBuilder> implements ToXContent { + + protected SortOrder order = SortOrder.ASC; + public static final ParseField ORDER_FIELD = new ParseField("order"); @Override public String toString() { @@ -42,13 +48,19 @@ public abstract class SortBuilder implements ToXContent { } /** - * The order of sorting. Defaults to {@link SortOrder#ASC}. + * Set the order of sorting. */ - public abstract SortBuilder order(SortOrder order); + @SuppressWarnings("unchecked") + public T order(SortOrder order) { + Objects.requireNonNull(order, "sort order cannot be null."); + this.order = order; + return (T) this; + } /** - * Sets the value when a field is missing in a doc. Can also be set to _last or - * _first to sort missing last or first respectively. + * Return the {@link SortOrder} used for this {@link SortBuilder}. */ - public abstract SortBuilder missing(Object missing); + public SortOrder order() { + return this.order; + } } diff --git a/core/src/main/java/org/elasticsearch/search/sort/SortElementParserTemp.java b/core/src/main/java/org/elasticsearch/search/sort/SortBuilderParser.java similarity index 79% rename from core/src/main/java/org/elasticsearch/search/sort/SortElementParserTemp.java rename to core/src/main/java/org/elasticsearch/search/sort/SortBuilderParser.java index 8893471b6c1..90d54a50121 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/SortElementParserTemp.java +++ b/core/src/main/java/org/elasticsearch/search/sort/SortBuilderParser.java @@ -19,15 +19,15 @@ package org.elasticsearch.search.sort; +import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.index.query.QueryParseContext; import java.io.IOException; -// TODO once sort refactoring is done this needs to be merged into SortBuilder -public interface SortElementParserTemp { +public interface SortBuilderParser extends NamedWriteable, ToXContent { /** - * Creates a new SortBuilder from the json held by the {@link SortElementParserTemp} + * Creates a new item from the json held by the {@link SortBuilderParser} * in {@link org.elasticsearch.common.xcontent.XContent} format * * @param context @@ -36,5 +36,5 @@ public interface SortElementParserTemp { * call * @return the new item */ - T fromXContent(QueryParseContext context, String elementName) throws IOException; + SortBuilder fromXContent(QueryParseContext context, String elementName) throws IOException; } diff --git a/core/src/main/java/org/elasticsearch/search/sort/SortBuilders.java b/core/src/main/java/org/elasticsearch/search/sort/SortBuilders.java index f326fee3837..3eae9b8d019 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/SortBuilders.java +++ b/core/src/main/java/org/elasticsearch/search/sort/SortBuilders.java @@ -21,8 +21,7 @@ package org.elasticsearch.search.sort; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.script.Script; - -import java.util.Arrays; +import org.elasticsearch.search.sort.ScriptSortBuilder.ScriptSortType; /** * A set of static factory methods for {@link SortBuilder}s. @@ -53,7 +52,7 @@ public class SortBuilders { * @param script The script to use. * @param type The type, can either be "string" or "number". */ - public static ScriptSortBuilder scriptSort(Script script, String type) { + public static ScriptSortBuilder scriptSort(Script script, ScriptSortType type) { return new ScriptSortBuilder(script, type); } @@ -63,12 +62,12 @@ public class SortBuilders { * @param fieldName The geo point like field name. * @param lat Latitude of the point to create the range distance facets from. * @param lon Longitude of the point to create the range distance facets from. - * + * */ public static GeoDistanceSortBuilder geoDistanceSort(String fieldName, double lat, double lon) { return new GeoDistanceSortBuilder(fieldName, lat, lon); } - + /** * Constructs a new distance based sort on a geo point like field. * @@ -87,5 +86,5 @@ public class SortBuilders { */ public static GeoDistanceSortBuilder geoDistanceSort(String fieldName, String ... geohashes) { return new GeoDistanceSortBuilder(fieldName, geohashes); - } + } } diff --git a/core/src/main/java/org/elasticsearch/search/sort/SortMode.java b/core/src/main/java/org/elasticsearch/search/sort/SortMode.java new file mode 100644 index 00000000000..2f6ce9401d4 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/sort/SortMode.java @@ -0,0 +1,91 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.sort; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.Locale; +import java.util.Objects; + +/** + * Elasticsearch supports sorting by array or multi-valued fields. The SortMode option controls what array value is picked + * for sorting the document it belongs to. The mode option can have the following values: + *

        + *
      • min - Pick the lowest value.
      • + *
      • max - Pick the highest value.
      • + *
      • sum - Use the sum of all values as sort value. Only applicable for number based array fields.
      • + *
      • avg - Use the average of all values as sort value. Only applicable for number based array fields.
      • + *
      • median - Use the median of all values as sort value. Only applicable for number based array fields.
      • + *
      + */ +public enum SortMode implements Writeable { + /** pick the lowest value **/ + MIN, + /** pick the highest value **/ + MAX, + /** Use the sum of all values as sort value. Only applicable for number based array fields. **/ + SUM, + /** Use the average of all values as sort value. Only applicable for number based array fields. **/ + AVG, + /** Use the median of all values as sort value. Only applicable for number based array fields. **/ + MEDIAN; + + static SortMode PROTOTYPE = MIN; + + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeVInt(ordinal()); + } + + @Override + public SortMode readFrom(final StreamInput in) throws IOException { + int ordinal = in.readVInt(); + if (ordinal < 0 || ordinal >= values().length) { + throw new IOException("Unknown SortMode ordinal [" + ordinal + "]"); + } + return values()[ordinal]; + } + + public static SortMode fromString(final String str) { + Objects.requireNonNull(str, "input string is null"); + switch (str.toLowerCase(Locale.ROOT)) { + case ("min"): + return MIN; + case ("max"): + return MAX; + case ("sum"): + return SUM; + case ("avg"): + return AVG; + case ("median"): + return MEDIAN; + default: + throw new IllegalArgumentException("Unknown SortMode [" + str + "]"); + } + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } +} \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/search/sort/SortParseElement.java b/core/src/main/java/org/elasticsearch/search/sort/SortParseElement.java index a99158787d3..fe0b62022fe 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/SortParseElement.java +++ b/core/src/main/java/org/elasticsearch/search/sort/SortParseElement.java @@ -30,7 +30,6 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.core.LongFieldMapper; import org.elasticsearch.index.query.support.NestedInnerQueryParseSupport; import org.elasticsearch.search.MultiValueMode; import org.elasticsearch.search.SearchParseElement; @@ -55,7 +54,6 @@ public class SortParseElement implements SearchParseElement { private static final SortField SORT_DOC = new SortField(null, SortField.Type.DOC); private static final SortField SORT_DOC_REVERSE = new SortField(null, SortField.Type.DOC, true); - public static final ParseField IGNORE_UNMAPPED = new ParseField("ignore_unmapped"); public static final ParseField UNMAPPED_TYPE = new ParseField("unmapped_type"); public static final String SCORE_FIELD_NAME = "_score"; @@ -140,7 +138,7 @@ public class SortParseElement implements SearchParseElement { addSortField(context, sortFields, fieldName, reverse, unmappedType, missing, sortMode, nestedFilterParseHelper); } else { if (PARSERS.containsKey(fieldName)) { - sortFields.add(PARSERS.get(fieldName).parse(parser, context)); + sortFields.add(PARSERS.get(fieldName).parse(parser, context.getQueryShardContext())); } else { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { @@ -156,19 +154,13 @@ public class SortParseElement implements SearchParseElement { } } else if ("missing".equals(innerJsonName)) { missing = parser.textOrNull(); - } else if (context.parseFieldMatcher().match(innerJsonName, IGNORE_UNMAPPED)) { - // backward compatibility: ignore_unmapped has been replaced with unmapped_type - if (unmappedType == null // don't override if unmapped_type has been provided too - && parser.booleanValue()) { - unmappedType = LongFieldMapper.CONTENT_TYPE; - } } else if (context.parseFieldMatcher().match(innerJsonName, UNMAPPED_TYPE)) { unmappedType = parser.textOrNull(); } else if ("mode".equals(innerJsonName)) { sortMode = MultiValueMode.fromString(parser.text()); } else if ("nested_path".equals(innerJsonName) || "nestedPath".equals(innerJsonName)) { if (nestedFilterParseHelper == null) { - nestedFilterParseHelper = new NestedInnerQueryParseSupport(parser, context); + nestedFilterParseHelper = new NestedInnerQueryParseSupport(parser, context.getQueryShardContext()); } nestedFilterParseHelper.setPath(parser.text()); } else { @@ -177,7 +169,7 @@ public class SortParseElement implements SearchParseElement { } else if (token == XContentParser.Token.START_OBJECT) { if ("nested_filter".equals(innerJsonName) || "nestedFilter".equals(innerJsonName)) { if (nestedFilterParseHelper == null) { - nestedFilterParseHelper = new NestedInnerQueryParseSupport(parser, context); + nestedFilterParseHelper = new NestedInnerQueryParseSupport(parser, context.getQueryShardContext()); } nestedFilterParseHelper.filter(); } else { @@ -239,14 +231,13 @@ public class SortParseElement implements SearchParseElement { final Nested nested; if (nestedHelper != null && nestedHelper.getPath() != null) { BitSetProducer rootDocumentsFilter = context.bitsetFilterCache().getBitSetProducer(Queries.newNonNestedFilter()); - Query innerDocumentsFilter; + Query innerDocumentsQuery; if (nestedHelper.filterFound()) { - // TODO: use queries instead - innerDocumentsFilter = nestedHelper.getInnerFilter(); + innerDocumentsQuery = nestedHelper.getInnerFilter(); } else { - innerDocumentsFilter = nestedHelper.getNestedObjectMapper().nestedTypeFilter(); + innerDocumentsQuery = nestedHelper.getNestedObjectMapper().nestedTypeFilter(); } - nested = new Nested(rootDocumentsFilter, context.searcher().createNormalizedWeight(innerDocumentsFilter, false)); + nested = new Nested(rootDocumentsFilter, innerDocumentsQuery); } else { nested = null; } diff --git a/core/src/main/java/org/elasticsearch/search/sort/SortParser.java b/core/src/main/java/org/elasticsearch/search/sort/SortParser.java index 6383afd8845..727e576a85e 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/SortParser.java +++ b/core/src/main/java/org/elasticsearch/search/sort/SortParser.java @@ -21,7 +21,7 @@ package org.elasticsearch.search.sort; import org.apache.lucene.search.SortField; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.index.query.QueryShardContext; /** * @@ -30,5 +30,5 @@ public interface SortParser { String[] names(); - SortField parse(XContentParser parser, SearchContext context) throws Exception; + SortField parse(XContentParser parser, QueryShardContext context) throws Exception; } diff --git a/core/src/main/java/org/elasticsearch/search/suggest/DirectSpellcheckerSettings.java b/core/src/main/java/org/elasticsearch/search/suggest/DirectSpellcheckerSettings.java index 2b4687c8497..81c73df53fa 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/DirectSpellcheckerSettings.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/DirectSpellcheckerSettings.java @@ -25,16 +25,29 @@ import org.apache.lucene.util.automaton.LevenshteinAutomata; public class DirectSpellcheckerSettings { - private SuggestMode suggestMode = SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX; - private float accuracy = 0.5f; - private Suggest.Suggestion.Sort sort = Suggest.Suggestion.Sort.SCORE; - private StringDistance stringDistance = DirectSpellChecker.INTERNAL_LEVENSHTEIN; - private int maxEdits = LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE; - private int maxInspections = 5; - private float maxTermFreq = 0.01f; - private int prefixLength = 1; - private int minWordLength = 4; - private float minDocFreq = 0f; + // NB: If this changes, make sure to change the default in TermBuilderSuggester + public static SuggestMode DEFAULT_SUGGEST_MODE = SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX; + public static float DEFAULT_ACCURACY = 0.5f; + public static SortBy DEFAULT_SORT = SortBy.SCORE; + // NB: If this changes, make sure to change the default in TermBuilderSuggester + public static StringDistance DEFAULT_STRING_DISTANCE = DirectSpellChecker.INTERNAL_LEVENSHTEIN; + public static int DEFAULT_MAX_EDITS = LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE; + public static int DEFAULT_MAX_INSPECTIONS = 5; + public static float DEFAULT_MAX_TERM_FREQ = 0.01f; + public static int DEFAULT_PREFIX_LENGTH = 1; + public static int DEFAULT_MIN_WORD_LENGTH = 4; + public static float DEFAULT_MIN_DOC_FREQ = 0f; + + private SuggestMode suggestMode = DEFAULT_SUGGEST_MODE; + private float accuracy = DEFAULT_ACCURACY; + private SortBy sort = DEFAULT_SORT; + private StringDistance stringDistance = DEFAULT_STRING_DISTANCE; + private int maxEdits = DEFAULT_MAX_EDITS; + private int maxInspections = DEFAULT_MAX_INSPECTIONS; + private float maxTermFreq = DEFAULT_MAX_TERM_FREQ; + private int prefixLength = DEFAULT_PREFIX_LENGTH; + private int minWordLength = DEFAULT_MIN_WORD_LENGTH; + private float minDocFreq = DEFAULT_MIN_DOC_FREQ; public SuggestMode suggestMode() { return suggestMode; @@ -52,11 +65,11 @@ public class DirectSpellcheckerSettings { this.accuracy = accuracy; } - public Suggest.Suggestion.Sort sort() { + public SortBy sort() { return sort; } - public void sort(Suggest.Suggestion.Sort sort) { + public void sort(SortBy sort) { this.sort = sort; } @@ -104,8 +117,8 @@ public class DirectSpellcheckerSettings { return minWordLength; } - public void minQueryLength(int minQueryLength) { - this.minWordLength = minQueryLength; + public void minWordLength(int minWordLength) { + this.minWordLength = minWordLength; } public float minDocFreq() { @@ -116,4 +129,20 @@ public class DirectSpellcheckerSettings { this.minDocFreq = minDocFreq; } -} \ No newline at end of file + @Override + public String toString() { + return "[" + + "suggestMode=" + suggestMode + + ",sort=" + sort + + ",stringDistance=" + stringDistance + + ",accuracy=" + accuracy + + ",maxEdits=" + maxEdits + + ",maxInspections=" + maxInspections + + ",maxTermFreq=" + maxTermFreq + + ",prefixLength=" + prefixLength + + ",minWordLength=" + minWordLength + + ",minDocFreq=" + minDocFreq + + "]"; + } + +} diff --git a/core/src/main/java/org/elasticsearch/search/suggest/SortBy.java b/core/src/main/java/org/elasticsearch/search/suggest/SortBy.java new file mode 100644 index 00000000000..14d46d134de --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/suggest/SortBy.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.suggest; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.Locale; +import java.util.Objects; + +/** + * An enum representing the valid sorting options + */ +public enum SortBy implements Writeable { + /** Sort should first be based on score, then document frequency and then the term itself. */ + SCORE, + /** Sort should first be based on document frequency, then score and then the term itself. */ + FREQUENCY; + + public static SortBy PROTOTYPE = SCORE; + + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeVInt(ordinal()); + } + + @Override + public SortBy readFrom(final StreamInput in) throws IOException { + int ordinal = in.readVInt(); + if (ordinal < 0 || ordinal >= values().length) { + throw new IOException("Unknown SortBy ordinal [" + ordinal + "]"); + } + return values()[ordinal]; + } + + public static SortBy resolve(final String str) { + Objects.requireNonNull(str, "Input string is null"); + return valueOf(str.toUpperCase(Locale.ROOT)); + } +} diff --git a/core/src/main/java/org/elasticsearch/search/suggest/Suggest.java b/core/src/main/java/org/elasticsearch/search/suggest/Suggest.java index 5ec92264389..f9c7092fbf1 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/Suggest.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/Suggest.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.suggest; import org.apache.lucene.util.CollectionUtil; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -47,9 +46,7 @@ import java.util.Map; */ public class Suggest implements Iterable>>, Streamable, ToXContent { - public static class Fields { - public static final XContentBuilderString SUGGEST = new XContentBuilderString("suggest"); - } + private static final XContentBuilderString NAME = new XContentBuilderString("suggest"); private static final Comparator
    */ @Override - public List parseQueryContext(XContentParser parser) throws IOException, ElasticsearchParseException { - List queryContexts = new ArrayList<>(); - Token token = parser.nextToken(); - if (token == Token.START_OBJECT || token == Token.VALUE_STRING) { - CategoryQueryContext parse = CategoryQueryContext.parse(parser); - queryContexts.add(new QueryContext(parse.getCategory().toString(), parse.getBoost(), parse.isPrefix())); - } else if (token == Token.START_ARRAY) { - while (parser.nextToken() != Token.END_ARRAY) { - CategoryQueryContext parse = CategoryQueryContext.parse(parser); - queryContexts.add(new QueryContext(parse.getCategory().toString(), parse.getBoost(), parse.isPrefix())); - } - } - return queryContexts; + public List toInternalQueryContexts(List queryContexts) { + List internalInternalQueryContexts = new ArrayList<>(queryContexts.size()); + internalInternalQueryContexts.addAll( + queryContexts.stream() + .map(queryContext -> new InternalQueryContext(queryContext.getCategory(), queryContext.getBoost(), queryContext.isPrefix())) + .collect(Collectors.toList())); + return internalInternalQueryContexts; } @Override diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion/context/CategoryQueryContext.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/context/CategoryQueryContext.java index c4931265776..a164faff8b1 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/completion/context/CategoryQueryContext.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/context/CategoryQueryContext.java @@ -22,11 +22,11 @@ package org.elasticsearch.search.suggest.completion.context; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ObjectParser; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; +import java.util.Objects; import static org.elasticsearch.search.suggest.completion.context.CategoryContextMapping.CONTEXT_BOOST; import static org.elasticsearch.search.suggest.completion.context.CategoryContextMapping.CONTEXT_PREFIX; @@ -35,12 +35,15 @@ import static org.elasticsearch.search.suggest.completion.context.CategoryContex /** * Defines the query context for {@link CategoryContextMapping} */ -public final class CategoryQueryContext implements ToXContent { - private final CharSequence category; +public final class CategoryQueryContext implements QueryContext { + public static final String NAME = "category"; + public static final CategoryQueryContext PROTOTYPE = new CategoryQueryContext("", 1, false); + + private final String category; private final boolean isPrefix; private final int boost; - private CategoryQueryContext(CharSequence category, int boost, boolean isPrefix) { + private CategoryQueryContext(String category, int boost, boolean isPrefix) { this.category = category; this.boost = boost; this.isPrefix = isPrefix; @@ -49,7 +52,7 @@ public final class CategoryQueryContext implements ToXContent { /** * Returns the category of the context */ - public CharSequence getCategory() { + public String getCategory() { return category; } @@ -71,54 +74,36 @@ public final class CategoryQueryContext implements ToXContent { return new Builder(); } - public static class Builder { - private CharSequence category; - private boolean isPrefix = false; - private int boost = 1; + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; - public Builder() { - } + CategoryQueryContext that = (CategoryQueryContext) o; - /** - * Sets the category of the context. - * This is a required field - */ - public Builder setCategory(CharSequence context) { - this.category = context; - return this; - } + if (isPrefix != that.isPrefix) return false; + if (boost != that.boost) return false; + return category != null ? category.equals(that.category) : that.category == null; - /** - * Sets if the context should be treated as a prefix or not. - * Defaults to false - */ - public Builder setPrefix(boolean prefix) { - this.isPrefix = prefix; - return this; - } - - /** - * Sets the query-time boost of the context. - * Defaults to 1. - */ - public Builder setBoost(int boost) { - this.boost = boost; - return this; - } - - public CategoryQueryContext build() { - return new CategoryQueryContext(category, boost, isPrefix); - } } - private static ObjectParser CATEGORY_PARSER = new ObjectParser<>("category", null); + @Override + public int hashCode() { + int result = category != null ? category.hashCode() : 0; + result = 31 * result + (isPrefix ? 1 : 0); + result = 31 * result + boost; + return result; + } + + private static ObjectParser CATEGORY_PARSER = new ObjectParser<>(NAME, null); static { - CATEGORY_PARSER.declareString(Builder::setCategory, new ParseField("context")); - CATEGORY_PARSER.declareInt(Builder::setBoost, new ParseField("boost")); - CATEGORY_PARSER.declareBoolean(Builder::setPrefix, new ParseField("prefix")); + CATEGORY_PARSER.declareString(Builder::setCategory, new ParseField(CONTEXT_VALUE)); + CATEGORY_PARSER.declareInt(Builder::setBoost, new ParseField(CONTEXT_BOOST)); + CATEGORY_PARSER.declareBoolean(Builder::setPrefix, new ParseField(CONTEXT_PREFIX)); } - public static CategoryQueryContext parse(XContentParser parser) throws IOException { + @Override + public CategoryQueryContext fromXContext(XContentParser parser) throws IOException { XContentParser.Token token = parser.currentToken(); Builder builder = builder(); if (token == XContentParser.Token.START_OBJECT) { @@ -140,4 +125,49 @@ public final class CategoryQueryContext implements ToXContent { builder.endObject(); return builder; } + + public static class Builder { + private String category; + private boolean isPrefix = false; + private int boost = 1; + + public Builder() { + } + + /** + * Sets the category of the category. + * This is a required field + */ + public Builder setCategory(String category) { + Objects.requireNonNull(category, "category must not be null"); + this.category = category; + return this; + } + + /** + * Sets if the context should be treated as a prefix or not. + * Defaults to false + */ + public Builder setPrefix(boolean prefix) { + this.isPrefix = prefix; + return this; + } + + /** + * Sets the query-time boost of the context. + * Defaults to 1. + */ + public Builder setBoost(int boost) { + if (boost <= 0) { + throw new IllegalArgumentException("boost must be greater than 0"); + } + this.boost = boost; + return this; + } + + public CategoryQueryContext build() { + Objects.requireNonNull(category, "category must not be null"); + return new CategoryQueryContext(category, boost, isPrefix); + } + } } diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMapping.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMapping.java index 42e5cc0a157..959a749a858 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMapping.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMapping.java @@ -23,11 +23,13 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.core.CompletionFieldMapper; import java.io.IOException; +import java.util.ArrayList; import java.util.List; import java.util.Objects; import java.util.Set; @@ -38,7 +40,7 @@ import java.util.Set; * * Implementations have to define how contexts are parsed at query/index time */ -public abstract class ContextMapping implements ToXContent { +public abstract class ContextMapping implements ToXContent { public static final String FIELD_TYPE = "type"; public static final String FIELD_NAME = "name"; @@ -94,10 +96,31 @@ public abstract class ContextMapping implements ToXContent { */ protected abstract Set parseContext(ParseContext.Document document); + /** + * Prototype for the query context + */ + protected abstract T prototype(); + /** * Parses query contexts for this mapper */ - public abstract List parseQueryContext(XContentParser parser) throws IOException, ElasticsearchParseException; + public final List parseQueryContext(XContentParser parser) throws IOException, ElasticsearchParseException { + List queryContexts = new ArrayList<>(); + Token token = parser.nextToken(); + if (token == Token.START_OBJECT || token == Token.VALUE_STRING) { + queryContexts.add((T) prototype().fromXContext(parser)); + } else if (token == Token.START_ARRAY) { + while (parser.nextToken() != Token.END_ARRAY) { + queryContexts.add((T) prototype().fromXContext(parser)); + } + } + return toInternalQueryContexts(queryContexts); + } + + /** + * Convert query contexts to common representation + */ + protected abstract List toInternalQueryContexts(List queryContexts); /** * Implementations should add specific configurations @@ -136,17 +159,38 @@ public abstract class ContextMapping implements ToXContent { } } - public static class QueryContext { + public static class InternalQueryContext { public final String context; public final int boost; public final boolean isPrefix; - public QueryContext(String context, int boost, boolean isPrefix) { + public InternalQueryContext(String context, int boost, boolean isPrefix) { this.context = context; this.boost = boost; this.isPrefix = isPrefix; } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + InternalQueryContext that = (InternalQueryContext) o; + + if (boost != that.boost) return false; + if (isPrefix != that.isPrefix) return false; + return context != null ? context.equals(that.context) : that.context == null; + + } + + @Override + public int hashCode() { + int result = context != null ? context.hashCode() : 0; + result = 31 * result + boost; + result = 31 * result + (isPrefix ? 1 : 0); + return result; + } + @Override public String toString() { return "QueryContext{" + diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMappings.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMappings.java index 9d4bed4f664..ff550a8d34e 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMappings.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMappings.java @@ -43,7 +43,6 @@ import java.util.Set; import static org.elasticsearch.search.suggest.completion.context.ContextMapping.FIELD_NAME; import static org.elasticsearch.search.suggest.completion.context.ContextMapping.FIELD_TYPE; -import static org.elasticsearch.search.suggest.completion.context.ContextMapping.QueryContext; import static org.elasticsearch.search.suggest.completion.context.ContextMapping.Type; /** @@ -153,7 +152,7 @@ public class ContextMappings implements ToXContent { * @param queryContexts a map of context mapping name and collected query contexts * @return a context-enabled query */ - public ContextQuery toContextQuery(CompletionQuery query, Map> queryContexts) { + public ContextQuery toContextQuery(CompletionQuery query, Map> queryContexts) { ContextQuery typedContextQuery = new ContextQuery(query); if (queryContexts.isEmpty() == false) { CharsRefBuilder scratch = new CharsRefBuilder(); @@ -162,9 +161,9 @@ public class ContextMappings implements ToXContent { scratch.setCharAt(0, (char) typeId); scratch.setLength(1); ContextMapping mapping = contextMappings.get(typeId); - List queryContext = queryContexts.get(mapping.name()); - if (queryContext != null) { - for (QueryContext context : queryContext) { + List internalQueryContext = queryContexts.get(mapping.name()); + if (internalQueryContext != null) { + for (ContextMapping.InternalQueryContext context : internalQueryContext) { scratch.append(context.context); typedContextQuery.addContext(scratch.toCharsRef(), context.boost, !context.isPrefix); scratch.setLength(1); diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java index 4af90ab24a2..41d78e75353 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java @@ -42,6 +42,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.stream.Collectors; import static org.apache.lucene.spatial.util.GeoHashUtils.addNeighbors; import static org.apache.lucene.spatial.util.GeoHashUtils.stringEncode; @@ -56,7 +57,7 @@ import static org.apache.lucene.spatial.util.GeoHashUtils.stringEncode; * {@link GeoQueryContext} defines the options for constructing * a unit of query context for this context type */ -public class GeoContextMapping extends ContextMapping { +public class GeoContextMapping extends ContextMapping { public static final String FIELD_PRECISION = "precision"; public static final String FIELD_FIELDNAME = "path"; @@ -221,6 +222,11 @@ public class GeoContextMapping extends ContextMapping { return locations; } + @Override + protected GeoQueryContext prototype() { + return GeoQueryContext.PROTOTYPE; + } + /** * Parse a list of {@link GeoQueryContext} * using parser. A QueryContexts accepts one of the following forms: @@ -245,22 +251,10 @@ public class GeoContextMapping extends ContextMapping { * see {@link GeoUtils#parseGeoPoint(String, GeoPoint)} for GEO POINT */ @Override - public List parseQueryContext(XContentParser parser) throws IOException, ElasticsearchParseException { - List queryContexts = new ArrayList<>(); - Token token = parser.nextToken(); - if (token == Token.START_OBJECT || token == Token.VALUE_STRING) { - queryContexts.add(GeoQueryContext.parse(parser)); - } else if (token == Token.START_ARRAY) { - while (parser.nextToken() != Token.END_ARRAY) { - queryContexts.add(GeoQueryContext.parse(parser)); - } - } - List queryContextList = new ArrayList<>(); + public List toInternalQueryContexts(List queryContexts) { + List internalQueryContextList = new ArrayList<>(); for (GeoQueryContext queryContext : queryContexts) { - int minPrecision = this.precision; - if (queryContext.getPrecision() != -1) { - minPrecision = Math.min(minPrecision, queryContext.getPrecision()); - } + int minPrecision = Math.min(this.precision, queryContext.getPrecision()); GeoPoint point = queryContext.getGeoPoint(); final Collection locations = new HashSet<>(); String geoHash = stringEncode(point.getLon(), point.getLat(), minPrecision); @@ -268,19 +262,20 @@ public class GeoContextMapping extends ContextMapping { if (queryContext.getNeighbours().isEmpty() && geoHash.length() == this.precision) { addNeighbors(geoHash, locations); } else if (queryContext.getNeighbours().isEmpty() == false) { - for (Integer neighbourPrecision : queryContext.getNeighbours()) { - if (neighbourPrecision < geoHash.length()) { + queryContext.getNeighbours().stream() + .filter(neighbourPrecision -> neighbourPrecision < geoHash.length()) + .forEach(neighbourPrecision -> { String truncatedGeoHash = geoHash.substring(0, neighbourPrecision); locations.add(truncatedGeoHash); addNeighbors(truncatedGeoHash, locations); - } - } - } - for (String location : locations) { - queryContextList.add(new QueryContext(location, queryContext.getBoost(), location.length() < this.precision)); + }); } + internalQueryContextList.addAll( + locations.stream() + .map(location -> new InternalQueryContext(location, queryContext.getBoost(), location.length() < this.precision)) + .collect(Collectors.toList())); } - return queryContextList; + return internalQueryContextList; } @Override @@ -304,7 +299,7 @@ public class GeoContextMapping extends ContextMapping { private int precision = DEFAULT_PRECISION; private String fieldName = null; - protected Builder(String name) { + public Builder(String name) { super(name); } diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoQueryContext.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoQueryContext.java index da9191bf2d5..913702c18d0 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoQueryContext.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoQueryContext.java @@ -24,13 +24,13 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.xcontent.ObjectParser; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; import java.util.Collections; import java.util.List; +import java.util.Objects; import static org.elasticsearch.search.suggest.completion.context.GeoContextMapping.CONTEXT_BOOST; import static org.elasticsearch.search.suggest.completion.context.GeoContextMapping.CONTEXT_NEIGHBOURS; @@ -40,7 +40,10 @@ import static org.elasticsearch.search.suggest.completion.context.GeoContextMapp /** * Defines the query context for {@link GeoContextMapping} */ -public final class GeoQueryContext implements ToXContent { +public final class GeoQueryContext implements QueryContext { + public static final String NAME = "geo"; + public static final GeoQueryContext PROTOTYPE = new GeoQueryContext(null, 1, 12, Collections.emptyList()); + private final GeoPoint geoPoint; private final int boost; private final int precision; @@ -81,90 +84,47 @@ public final class GeoQueryContext implements ToXContent { return neighbours; } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + GeoQueryContext that = (GeoQueryContext) o; + + if (boost != that.boost) return false; + if (precision != that.precision) return false; + if (geoPoint != null ? !geoPoint.equals(that.geoPoint) : that.geoPoint != null) return false; + return neighbours != null ? neighbours.equals(that.neighbours) : that.neighbours == null; + + } + + @Override + public int hashCode() { + int result = geoPoint != null ? geoPoint.hashCode() : 0; + result = 31 * result + boost; + result = 31 * result + precision; + result = 31 * result + (neighbours != null ? neighbours.hashCode() : 0); + return result; + } + public static Builder builder() { return new Builder(); } - public static class Builder { - private GeoPoint geoPoint; - private int boost = 1; - private int precision = -1; - private List neighbours = Collections.emptyList(); - - public Builder() { - } - - /** - * Sets the query-time boost for the context - * Defaults to 1 - */ - public Builder setBoost(int boost) { - this.boost = boost; - return this; - } - - /** - * Sets the precision level for computing the geohash from the context geo point. - * Defaults to using index-time precision level - */ - public Builder setPrecision(int precision) { - this.precision = precision; - return this; - } - - /** - * Sets the precision levels at which geohash cells neighbours are considered. - * Defaults to only considering neighbours at the index-time precision level - */ - public Builder setNeighbours(List neighbours) { - this.neighbours = neighbours; - return this; - } - - /** - * Sets the geo point of the context. - * This is a required field - */ - public Builder setGeoPoint(GeoPoint geoPoint) { - this.geoPoint = geoPoint; - return this; - } - - private double lat = Double.NaN; - void setLat(double lat) { - this.lat = lat; - } - - private double lon = Double.NaN; - void setLon(double lon) { - this.lon = lon; - } - - public GeoQueryContext build() { - if (geoPoint == null) { - if (Double.isNaN(lat) == false && Double.isNaN(lon) == false) { - geoPoint = new GeoPoint(lat, lon); - } else { - throw new IllegalArgumentException("no geohash or geo point provided"); - } - } - return new GeoQueryContext(geoPoint, boost, precision, neighbours); - } - } - - private static ObjectParser GEO_CONTEXT_PARSER = new ObjectParser<>("geo", null); + private static ObjectParser GEO_CONTEXT_PARSER = new ObjectParser<>(NAME, null); static { - GEO_CONTEXT_PARSER.declareField((parser, geoQueryContext, geoContextMapping) -> geoQueryContext.setGeoPoint(GeoUtils.parseGeoPoint(parser)), new ParseField("context"), ObjectParser.ValueType.OBJECT); - GEO_CONTEXT_PARSER.declareInt(GeoQueryContext.Builder::setBoost, new ParseField("boost")); + GEO_CONTEXT_PARSER.declareField((parser, geoQueryContext, geoContextMapping) -> geoQueryContext.setGeoPoint(GeoUtils.parseGeoPoint(parser)), new ParseField(CONTEXT_VALUE), ObjectParser.ValueType.OBJECT); + GEO_CONTEXT_PARSER.declareInt(GeoQueryContext.Builder::setBoost, new ParseField(CONTEXT_BOOST)); // TODO : add string support for precision for GeoUtils.geoHashLevelsForPrecision() - GEO_CONTEXT_PARSER.declareInt(GeoQueryContext.Builder::setPrecision, new ParseField("precision")); + GEO_CONTEXT_PARSER.declareInt(GeoQueryContext.Builder::setPrecision, new ParseField(CONTEXT_PRECISION)); // TODO : add string array support for precision for GeoUtils.geoHashLevelsForPrecision() - GEO_CONTEXT_PARSER.declareIntArray(GeoQueryContext.Builder::setNeighbours, new ParseField("neighbours")); + GEO_CONTEXT_PARSER.declareIntArray(GeoQueryContext.Builder::setNeighbours, new ParseField(CONTEXT_NEIGHBOURS)); GEO_CONTEXT_PARSER.declareDouble(GeoQueryContext.Builder::setLat, new ParseField("lat")); GEO_CONTEXT_PARSER.declareDouble(GeoQueryContext.Builder::setLon, new ParseField("lon")); } - public static GeoQueryContext parse(XContentParser parser) throws IOException { + @Override + public GeoQueryContext fromXContext(XContentParser parser) throws IOException { XContentParser.Token token = parser.currentToken(); GeoQueryContext.Builder builder = new Builder(); if (token == XContentParser.Token.START_OBJECT) { @@ -190,4 +150,82 @@ public final class GeoQueryContext implements ToXContent { builder.endObject(); return builder; } + + public static class Builder { + private GeoPoint geoPoint; + private int boost = 1; + private int precision = 12; + private List neighbours = Collections.emptyList(); + + public Builder() { + } + + /** + * Sets the query-time boost for the context + * Defaults to 1 + */ + public Builder setBoost(int boost) { + if (boost <= 0) { + throw new IllegalArgumentException("boost must be greater than 0"); + } + this.boost = boost; + return this; + } + + /** + * Sets the precision level for computing the geohash from the context geo point. + * Defaults to using index-time precision level + */ + public Builder setPrecision(int precision) { + if (precision < 1 || precision > 12) { + throw new IllegalArgumentException("precision must be between 1 and 12"); + } + this.precision = precision; + return this; + } + + /** + * Sets the precision levels at which geohash cells neighbours are considered. + * Defaults to only considering neighbours at the index-time precision level + */ + public Builder setNeighbours(List neighbours) { + for (int neighbour : neighbours) { + if (neighbour < 1 || neighbour > 12) { + throw new IllegalArgumentException("neighbour value must be between 1 and 12"); + } + } + this.neighbours = neighbours; + return this; + } + + /** + * Sets the geo point of the context. + * This is a required field + */ + public Builder setGeoPoint(GeoPoint geoPoint) { + Objects.requireNonNull(geoPoint, "geoPoint must not be null"); + this.geoPoint = geoPoint; + return this; + } + + private double lat = Double.NaN; + void setLat(double lat) { + this.lat = lat; + } + + private double lon = Double.NaN; + void setLon(double lon) { + this.lon = lon; + } + + public GeoQueryContext build() { + if (geoPoint == null) { + if (Double.isNaN(lat) == false && Double.isNaN(lon) == false) { + geoPoint = new GeoPoint(lat, lon); + } + } + Objects.requireNonNull(geoPoint, "geoPoint must not be null"); + return new GeoQueryContext(geoPoint, boost, precision, neighbours); + } + } } diff --git a/core/src/main/java/org/elasticsearch/search/suggest/SuggestContextParser.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/context/QueryContext.java similarity index 70% rename from core/src/main/java/org/elasticsearch/search/suggest/SuggestContextParser.java rename to core/src/main/java/org/elasticsearch/search/suggest/completion/context/QueryContext.java index a7aa3fd60b6..9d96bf81447 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/SuggestContextParser.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/context/QueryContext.java @@ -16,15 +16,18 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.search.suggest; +package org.elasticsearch.search.suggest.completion.context; + +import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.fielddata.IndexFieldDataService; -import org.elasticsearch.index.mapper.MapperService; import java.io.IOException; -public interface SuggestContextParser { - SuggestionSearchContext.SuggestionContext parse(XContentParser parser, MapperService mapperService, IndexFieldDataService indexFieldDataService) throws IOException; +/** + * Interface for serializing/de-serializing completion query context + */ +public interface QueryContext extends ToXContent { + QueryContext fromXContext(XContentParser parser) throws IOException; } diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java index 5b937500d6b..a454735ae1c 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java @@ -178,7 +178,7 @@ public final class DirectCandidateGenerator extends CandidateGenerator { protected long thresholdFrequency(long termFrequency, long dictionarySize) { if (termFrequency > 0) { - return (long) Math.max(0, Math.round(termFrequency * (Math.log10(termFrequency - frequencyPlateau) * (1.0 / Math.log10(logBase))) + 1)); + return Math.max(0, Math.round(termFrequency * (Math.log10(termFrequency - frequencyPlateau) * (1.0 / Math.log10(logBase))) + 1)); } return 0; diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java index 90ec2845b8a..f6a94314f5b 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java @@ -25,13 +25,12 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.search.suggest.SortBy; import org.elasticsearch.search.suggest.SuggestUtils; import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder.CandidateGenerator; @@ -42,7 +41,7 @@ import java.util.Set; import java.util.function.Consumer; public final class DirectCandidateGeneratorBuilder - implements Writeable, CandidateGenerator { + implements CandidateGenerator { private static final String TYPE = "direct_generator"; static final DirectCandidateGeneratorBuilder PROTOTYPE = new DirectCandidateGeneratorBuilder("_na_"); @@ -350,8 +349,7 @@ public final class DirectCandidateGeneratorBuilder return replaceField(tmpFieldName.iterator().next(), tempGenerator); } - public PhraseSuggestionContext.DirectCandidateGenerator build(QueryShardContext context) throws IOException { - MapperService mapperService = context.getMapperService(); + public PhraseSuggestionContext.DirectCandidateGenerator build(MapperService mapperService) throws IOException { PhraseSuggestionContext.DirectCandidateGenerator generator = new PhraseSuggestionContext.DirectCandidateGenerator(); generator.setField(this.field); transferIfNotNull(this.size, generator::size); @@ -372,7 +370,7 @@ public final class DirectCandidateGeneratorBuilder generator.suggestMode(SuggestUtils.resolveSuggestMode(this.suggestMode)); } if (this.sort != null) { - generator.sort(SuggestUtils.resolveSort(this.sort)); + generator.sort(SortBy.resolve(this.sort)); } if (this.stringDistance != null) { generator.stringDistance(SuggestUtils.resolveDistance(this.stringDistance)); @@ -384,7 +382,7 @@ public final class DirectCandidateGeneratorBuilder transferIfNotNull(this.maxInspections, generator::maxInspections); transferIfNotNull(this.maxTermFreq, generator::maxTermFreq); transferIfNotNull(this.prefixLength, generator::prefixLength); - transferIfNotNull(this.minWordLength, generator::minQueryLength); + transferIfNotNull(this.minWordLength, generator::minWordLength); transferIfNotNull(this.minDocFreq, generator::minDocFreq); return generator; } @@ -490,4 +488,4 @@ public final class DirectCandidateGeneratorBuilder Objects.equals(minWordLength, other.minWordLength) && Objects.equals(minDocFreq, other.minDocFreq); } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/Laplace.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/Laplace.java new file mode 100644 index 00000000000..e11a920f966 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/Laplace.java @@ -0,0 +1,126 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.suggest.phrase; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.Terms; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.index.query.QueryParseContext; +import org.elasticsearch.search.suggest.phrase.WordScorer.WordScorerFactory; + +import java.io.IOException; +import java.util.Objects; + +/** + * An additive + * smoothing model. + *

    + * See N-Gram + * Smoothing for details. + *

    + */ +public final class Laplace extends SmoothingModel { + private double alpha = DEFAULT_LAPLACE_ALPHA; + private static final String NAME = "laplace"; + private static final ParseField ALPHA_FIELD = new ParseField("alpha"); + static final ParseField PARSE_FIELD = new ParseField(NAME); + /** + * Default alpha parameter for laplace smoothing + */ + public static final double DEFAULT_LAPLACE_ALPHA = 0.5; + public static final Laplace PROTOTYPE = new Laplace(DEFAULT_LAPLACE_ALPHA); + + /** + * Creates a Laplace smoothing model. + * + */ + public Laplace(double alpha) { + this.alpha = alpha; + } + + /** + * @return the laplace model alpha parameter + */ + public double getAlpha() { + return this.alpha; + } + + @Override + protected XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(ALPHA_FIELD.getPreferredName(), alpha); + return builder; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeDouble(alpha); + } + + @Override + public SmoothingModel readFrom(StreamInput in) throws IOException { + return new Laplace(in.readDouble()); + } + + @Override + protected boolean doEquals(SmoothingModel other) { + Laplace otherModel = (Laplace) other; + return Objects.equals(alpha, otherModel.alpha); + } + + @Override + protected final int doHashCode() { + return Objects.hash(alpha); + } + + @Override + public SmoothingModel innerFromXContent(QueryParseContext parseContext) throws IOException { + XContentParser parser = parseContext.parser(); + XContentParser.Token token; + String fieldName = null; + double alpha = DEFAULT_LAPLACE_ALPHA; + while ((token = parser.nextToken()) != Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + fieldName = parser.currentName(); + } + if (token.isValue() && parseContext.parseFieldMatcher().match(fieldName, ALPHA_FIELD)) { + alpha = parser.doubleValue(); + } + } + return new Laplace(alpha); + } + + @Override + public WordScorerFactory buildWordScorerFactory() { + return (IndexReader reader, Terms terms, String field, double realWordLikelyhood, BytesRef separator) + -> new LaplaceScorer(reader, terms, field, realWordLikelyhood, separator, alpha); + } +} \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/LinearInterpolation.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/LinearInterpolation.java new file mode 100644 index 00000000000..b94ea333fdb --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/LinearInterpolation.java @@ -0,0 +1,176 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.suggest.phrase; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.Terms; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.index.query.QueryParseContext; +import org.elasticsearch.search.suggest.phrase.WordScorer.WordScorerFactory; + +import java.io.IOException; +import java.util.Objects; + +/** + * Linear interpolation smoothing model. + *

    + * See N-Gram + * Smoothing for details. + *

    + */ +public final class LinearInterpolation extends SmoothingModel { + private static final String NAME = "linear"; + public static final LinearInterpolation PROTOTYPE = new LinearInterpolation(0.8, 0.1, 0.1); + private final double trigramLambda; + private final double bigramLambda; + private final double unigramLambda; + static final ParseField PARSE_FIELD = new ParseField(NAME); + private static final ParseField TRIGRAM_FIELD = new ParseField("trigram_lambda"); + private static final ParseField BIGRAM_FIELD = new ParseField("bigram_lambda"); + private static final ParseField UNIGRAM_FIELD = new ParseField("unigram_lambda"); + + /** + * Creates a linear interpolation smoothing model. + * + * Note: the lambdas must sum up to one. + * + * @param trigramLambda + * the trigram lambda + * @param bigramLambda + * the bigram lambda + * @param unigramLambda + * the unigram lambda + */ + public LinearInterpolation(double trigramLambda, double bigramLambda, double unigramLambda) { + double sum = trigramLambda + bigramLambda + unigramLambda; + if (Math.abs(sum - 1.0) > 0.001) { + throw new IllegalArgumentException("linear smoothing lambdas must sum to 1"); + } + this.trigramLambda = trigramLambda; + this.bigramLambda = bigramLambda; + this.unigramLambda = unigramLambda; + } + + public double getTrigramLambda() { + return this.trigramLambda; + } + + public double getBigramLambda() { + return this.bigramLambda; + } + + public double getUnigramLambda() { + return this.unigramLambda; + } + + @Override + protected XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(TRIGRAM_FIELD.getPreferredName(), trigramLambda); + builder.field(BIGRAM_FIELD.getPreferredName(), bigramLambda); + builder.field(UNIGRAM_FIELD.getPreferredName(), unigramLambda); + return builder; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeDouble(trigramLambda); + out.writeDouble(bigramLambda); + out.writeDouble(unigramLambda); + } + + @Override + public LinearInterpolation readFrom(StreamInput in) throws IOException { + return new LinearInterpolation(in.readDouble(), in.readDouble(), in.readDouble()); + } + + @Override + protected boolean doEquals(SmoothingModel other) { + final LinearInterpolation otherModel = (LinearInterpolation) other; + return Objects.equals(trigramLambda, otherModel.trigramLambda) && + Objects.equals(bigramLambda, otherModel.bigramLambda) && + Objects.equals(unigramLambda, otherModel.unigramLambda); + } + + @Override + protected final int doHashCode() { + return Objects.hash(trigramLambda, bigramLambda, unigramLambda); + } + + @Override + public LinearInterpolation innerFromXContent(QueryParseContext parseContext) throws IOException { + XContentParser parser = parseContext.parser(); + XContentParser.Token token; + String fieldName = null; + double trigramLambda = 0.0; + double bigramLambda = 0.0; + double unigramLambda = 0.0; + ParseFieldMatcher matcher = parseContext.parseFieldMatcher(); + while ((token = parser.nextToken()) != Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + fieldName = parser.currentName(); + } else if (token.isValue()) { + if (matcher.match(fieldName, TRIGRAM_FIELD)) { + trigramLambda = parser.doubleValue(); + if (trigramLambda < 0) { + throw new IllegalArgumentException("trigram_lambda must be positive"); + } + } else if (matcher.match(fieldName, BIGRAM_FIELD)) { + bigramLambda = parser.doubleValue(); + if (bigramLambda < 0) { + throw new IllegalArgumentException("bigram_lambda must be positive"); + } + } else if (matcher.match(fieldName, UNIGRAM_FIELD)) { + unigramLambda = parser.doubleValue(); + if (unigramLambda < 0) { + throw new IllegalArgumentException("unigram_lambda must be positive"); + } + } else { + throw new IllegalArgumentException( + "suggester[phrase][smoothing][linear] doesn't support field [" + fieldName + "]"); + } + } else { + throw new ParsingException(parser.getTokenLocation(), + "[" + NAME + "] unknown token [" + token + "] after [" + fieldName + "]"); + } + } + return new LinearInterpolation(trigramLambda, bigramLambda, unigramLambda); + } + + @Override + public WordScorerFactory buildWordScorerFactory() { + return (IndexReader reader, Terms terms, String field, double realWordLikelyhood, BytesRef separator) -> + new LinearInterpoatingScorer(reader, terms, field, realWordLikelyhood, separator, trigramLambda, bigramLambda, + unigramLambda); + } +} \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java deleted file mode 100644 index fc60fc6fc80..00000000000 --- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java +++ /dev/null @@ -1,358 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.search.suggest.phrase; - -import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.Terms; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentParser.Token; -import org.elasticsearch.index.analysis.ShingleTokenFilterFactory; -import org.elasticsearch.index.fielddata.IndexFieldDataService; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.script.CompiledScript; -import org.elasticsearch.script.ScriptContext; -import org.elasticsearch.script.Template; -import org.elasticsearch.search.suggest.SuggestContextParser; -import org.elasticsearch.search.suggest.SuggestUtils; -import org.elasticsearch.search.suggest.SuggestionSearchContext; -import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder.Laplace; -import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder.StupidBackoff; -import org.elasticsearch.search.suggest.phrase.PhraseSuggestionContext.DirectCandidateGenerator; - -import java.io.IOException; -import java.util.Collections; - -public final class PhraseSuggestParser implements SuggestContextParser { - - private PhraseSuggester suggester; - - public PhraseSuggestParser(PhraseSuggester suggester) { - this.suggester = suggester; - } - - @Override - public SuggestionSearchContext.SuggestionContext parse(XContentParser parser, MapperService mapperService, IndexFieldDataService fieldDataService) throws IOException { - PhraseSuggestionContext suggestion = new PhraseSuggestionContext(suggester); - ParseFieldMatcher parseFieldMatcher = mapperService.getIndexSettings().getParseFieldMatcher(); - XContentParser.Token token; - String fieldName = null; - boolean gramSizeSet = false; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - fieldName = parser.currentName(); - } else if (token.isValue()) { - if (!SuggestUtils.parseSuggestContext(parser, mapperService, fieldName, suggestion, parseFieldMatcher)) { - if ("real_word_error_likelihood".equals(fieldName) || "realWorldErrorLikelihood".equals(fieldName)) { - suggestion.setRealWordErrorLikelihood(parser.floatValue()); - if (suggestion.realworldErrorLikelyhood() <= 0.0) { - throw new IllegalArgumentException("real_word_error_likelihood must be > 0.0"); - } - } else if ("confidence".equals(fieldName)) { - suggestion.setConfidence(parser.floatValue()); - if (suggestion.confidence() < 0.0) { - throw new IllegalArgumentException("confidence must be >= 0.0"); - } - } else if ("separator".equals(fieldName)) { - suggestion.setSeparator(new BytesRef(parser.text())); - } else if ("max_errors".equals(fieldName) || "maxErrors".equals(fieldName)) { - suggestion.setMaxErrors(parser.floatValue()); - if (suggestion.maxErrors() <= 0.0) { - throw new IllegalArgumentException("max_error must be > 0.0"); - } - } else if ("gram_size".equals(fieldName) || "gramSize".equals(fieldName)) { - suggestion.setGramSize(parser.intValue()); - if (suggestion.gramSize() < 1) { - throw new IllegalArgumentException("gram_size must be >= 1"); - } - gramSizeSet = true; - } else if ("force_unigrams".equals(fieldName) || "forceUnigrams".equals(fieldName)) { - suggestion.setRequireUnigram(parser.booleanValue()); - } else if ("token_limit".equals(fieldName) || "tokenLimit".equals(fieldName)) { - int tokenLimit = parser.intValue(); - if (tokenLimit <= 0) { - throw new IllegalArgumentException("token_limit must be >= 1"); - } - suggestion.setTokenLimit(tokenLimit); - } else { - throw new IllegalArgumentException("suggester[phrase] doesn't support field [" + fieldName + "]"); - } - } - } else if (token == Token.START_ARRAY) { - if (parseFieldMatcher.match(fieldName, DirectCandidateGeneratorBuilder.DIRECT_GENERATOR_FIELD)) { - // for now we only have a single type of generators - while ((token = parser.nextToken()) == Token.START_OBJECT) { - PhraseSuggestionContext.DirectCandidateGenerator generator = parseCandidateGenerator(parser, mapperService, parseFieldMatcher); - verifyGenerator(generator); - suggestion.addGenerator(generator); - } - } else { - throw new IllegalArgumentException("suggester[phrase] doesn't support array field [" + fieldName + "]"); - } - } else if (token == Token.START_OBJECT) { - if ("smoothing".equals(fieldName)) { - parseSmoothingModel(parser, suggestion, fieldName); - } else if ("highlight".equals(fieldName)) { - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - fieldName = parser.currentName(); - } else if (token.isValue()) { - if ("pre_tag".equals(fieldName) || "preTag".equals(fieldName)) { - suggestion.setPreTag(parser.utf8Bytes()); - } else if ("post_tag".equals(fieldName) || "postTag".equals(fieldName)) { - suggestion.setPostTag(parser.utf8Bytes()); - } else { - throw new IllegalArgumentException( - "suggester[phrase][highlight] doesn't support field [" + fieldName + "]"); - } - } - } - } else if ("collate".equals(fieldName)) { - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - fieldName = parser.currentName(); - } else if ("query".equals(fieldName)) { - if (suggestion.getCollateQueryScript() != null) { - throw new IllegalArgumentException("suggester[phrase][collate] query already set, doesn't support additional [" + fieldName + "]"); - } - Template template = Template.parse(parser, parseFieldMatcher); - CompiledScript compiledScript = suggester.scriptService().compile(template, ScriptContext.Standard.SEARCH, Collections.emptyMap()); - suggestion.setCollateQueryScript(compiledScript); - } else if ("params".equals(fieldName)) { - suggestion.setCollateScriptParams(parser.map()); - } else if ("prune".equals(fieldName)) { - if (parser.isBooleanValue()) { - suggestion.setCollatePrune(parser.booleanValue()); - } else { - throw new IllegalArgumentException("suggester[phrase][collate] prune must be either 'true' or 'false'"); - } - } else { - throw new IllegalArgumentException( - "suggester[phrase][collate] doesn't support field [" + fieldName + "]"); - } - } - } else { - throw new IllegalArgumentException("suggester[phrase] doesn't support array field [" + fieldName + "]"); - } - } else { - throw new IllegalArgumentException("suggester[phrase] doesn't support field [" + fieldName + "]"); - } - } - - if (suggestion.getField() == null) { - throw new IllegalArgumentException("The required field option is missing"); - } - - MappedFieldType fieldType = mapperService.fullName(suggestion.getField()); - if (fieldType == null) { - throw new IllegalArgumentException("No mapping found for field [" + suggestion.getField() + "]"); - } else if (suggestion.getAnalyzer() == null) { - // no analyzer name passed in, so try the field's analyzer, or the default analyzer - if (fieldType.searchAnalyzer() == null) { - suggestion.setAnalyzer(mapperService.searchAnalyzer()); - } else { - suggestion.setAnalyzer(fieldType.searchAnalyzer()); - } - } - - if (suggestion.model() == null) { - suggestion.setModel(StupidBackoffScorer.FACTORY); - } - - if (!gramSizeSet || suggestion.generators().isEmpty()) { - final ShingleTokenFilterFactory.Factory shingleFilterFactory = SuggestUtils.getShingleFilterFactory(suggestion.getAnalyzer()); - if (!gramSizeSet) { - // try to detect the shingle size - if (shingleFilterFactory != null) { - suggestion.setGramSize(shingleFilterFactory.getMaxShingleSize()); - if (suggestion.getAnalyzer() == null && shingleFilterFactory.getMinShingleSize() > 1 && !shingleFilterFactory.getOutputUnigrams()) { - throw new IllegalArgumentException("The default analyzer for field: [" + suggestion.getField() + "] doesn't emit unigrams. If this is intentional try to set the analyzer explicitly"); - } - } - } - if (suggestion.generators().isEmpty()) { - if (shingleFilterFactory != null && shingleFilterFactory.getMinShingleSize() > 1 && !shingleFilterFactory.getOutputUnigrams() && suggestion.getRequireUnigram()) { - throw new IllegalArgumentException("The default candidate generator for phrase suggest can't operate on field: [" + suggestion.getField() + "] since it doesn't emit unigrams. If this is intentional try to set the candidate generator field explicitly"); - } - // use a default generator on the same field - DirectCandidateGenerator generator = new DirectCandidateGenerator(); - generator.setField(suggestion.getField()); - suggestion.addGenerator(generator); - } - } - - - - return suggestion; - } - - public void parseSmoothingModel(XContentParser parser, PhraseSuggestionContext suggestion, String fieldName) throws IOException { - XContentParser.Token token; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - fieldName = parser.currentName(); - if ("linear".equals(fieldName)) { - ensureNoSmoothing(suggestion); - final double[] lambdas = new double[3]; - while ((token = parser.nextToken()) != Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - fieldName = parser.currentName(); - } - if (token.isValue()) { - if ("trigram_lambda".equals(fieldName) || "trigramLambda".equals(fieldName)) { - lambdas[0] = parser.doubleValue(); - if (lambdas[0] < 0) { - throw new IllegalArgumentException("trigram_lambda must be positive"); - } - } else if ("bigram_lambda".equals(fieldName) || "bigramLambda".equals(fieldName)) { - lambdas[1] = parser.doubleValue(); - if (lambdas[1] < 0) { - throw new IllegalArgumentException("bigram_lambda must be positive"); - } - } else if ("unigram_lambda".equals(fieldName) || "unigramLambda".equals(fieldName)) { - lambdas[2] = parser.doubleValue(); - if (lambdas[2] < 0) { - throw new IllegalArgumentException("unigram_lambda must be positive"); - } - } else { - throw new IllegalArgumentException( - "suggester[phrase][smoothing][linear] doesn't support field [" + fieldName + "]"); - } - } - } - double sum = 0.0d; - for (int i = 0; i < lambdas.length; i++) { - sum += lambdas[i]; - } - if (Math.abs(sum - 1.0) > 0.001) { - throw new IllegalArgumentException("linear smoothing lambdas must sum to 1"); - } - suggestion.setModel(new WordScorer.WordScorerFactory() { - @Override - public WordScorer newScorer(IndexReader reader, Terms terms, String field, double realWordLikelyhood, BytesRef separator) - throws IOException { - return new LinearInterpoatingScorer(reader, terms, field, realWordLikelyhood, separator, lambdas[0], lambdas[1], - lambdas[2]); - } - }); - } else if ("laplace".equals(fieldName)) { - ensureNoSmoothing(suggestion); - double theAlpha = Laplace.DEFAULT_LAPLACE_ALPHA; - - while ((token = parser.nextToken()) != Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - fieldName = parser.currentName(); - } - if (token.isValue() && "alpha".equals(fieldName)) { - theAlpha = parser.doubleValue(); - } - } - final double alpha = theAlpha; - suggestion.setModel(new WordScorer.WordScorerFactory() { - @Override - public WordScorer newScorer(IndexReader reader, Terms terms, String field, double realWordLikelyhood, BytesRef separator) - throws IOException { - return new LaplaceScorer(reader, terms, field, realWordLikelyhood, separator, alpha); - } - }); - - } else if ("stupid_backoff".equals(fieldName) || "stupidBackoff".equals(fieldName)) { - ensureNoSmoothing(suggestion); - double theDiscount = StupidBackoff.DEFAULT_BACKOFF_DISCOUNT; - while ((token = parser.nextToken()) != Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - fieldName = parser.currentName(); - } - if (token.isValue() && "discount".equals(fieldName)) { - theDiscount = parser.doubleValue(); - } - } - final double discount = theDiscount; - suggestion.setModel(new WordScorer.WordScorerFactory() { - @Override - public WordScorer newScorer(IndexReader reader, Terms terms, String field, double realWordLikelyhood, BytesRef separator) - throws IOException { - return new StupidBackoffScorer(reader, terms, field, realWordLikelyhood, separator, discount); - } - }); - - } else { - throw new IllegalArgumentException("suggester[phrase] doesn't support object field [" + fieldName + "]"); - } - } - } - } - - private void ensureNoSmoothing(PhraseSuggestionContext suggestion) { - if (suggestion.model() != null) { - throw new IllegalArgumentException("only one smoothing model supported"); - } - } - - private void verifyGenerator(PhraseSuggestionContext.DirectCandidateGenerator suggestion) { - // Verify options and set defaults - if (suggestion.field() == null) { - throw new IllegalArgumentException("The required field option is missing"); - } - } - - static PhraseSuggestionContext.DirectCandidateGenerator parseCandidateGenerator(XContentParser parser, MapperService mapperService, - ParseFieldMatcher parseFieldMatcher) throws IOException { - XContentParser.Token token; - String fieldName = null; - PhraseSuggestionContext.DirectCandidateGenerator generator = new PhraseSuggestionContext.DirectCandidateGenerator(); - while ((token = parser.nextToken()) != Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - fieldName = parser.currentName(); - } - if (token.isValue()) { - if (!SuggestUtils.parseDirectSpellcheckerSettings(parser, fieldName, generator, parseFieldMatcher)) { - if ("field".equals(fieldName)) { - generator.setField(parser.text()); - if (mapperService.fullName(generator.field()) == null) { - throw new IllegalArgumentException("No mapping found for field [" + generator.field() + "]"); - } - } else if ("size".equals(fieldName)) { - generator.size(parser.intValue()); - } else if ("pre_filter".equals(fieldName) || "preFilter".equals(fieldName)) { - String analyzerName = parser.text(); - Analyzer analyzer = mapperService.analysisService().analyzer(analyzerName); - if (analyzer == null) { - throw new IllegalArgumentException("Analyzer [" + analyzerName + "] doesn't exists"); - } - generator.preFilter(analyzer); - } else if ("post_filter".equals(fieldName) || "postFilter".equals(fieldName)) { - String analyzerName = parser.text(); - Analyzer analyzer = mapperService.analysisService().analyzer(analyzerName); - if (analyzer == null) { - throw new IllegalArgumentException("Analyzer [" + analyzerName + "] doesn't exists"); - } - generator.postFilter(analyzer); - } else { - throw new IllegalArgumentException("CandidateGenerator doesn't support [" + fieldName + "]"); - } - } - } - } - return generator; - } -} diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java index 7838eacd960..14bced639f2 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java @@ -31,18 +31,17 @@ import org.apache.lucene.util.CharsRefBuilder; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.text.Text; -import org.elasticsearch.index.IndexService; import org.elasticsearch.index.query.ParsedQuery; -import org.elasticsearch.indices.IndicesService; import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.suggest.Suggest.Suggestion; import org.elasticsearch.search.suggest.Suggest.Suggestion.Entry; import org.elasticsearch.search.suggest.Suggest.Suggestion.Entry.Option; -import org.elasticsearch.search.suggest.SuggestContextParser; import org.elasticsearch.search.suggest.SuggestUtils; import org.elasticsearch.search.suggest.Suggester; +import org.elasticsearch.search.suggest.SuggestionBuilder; +import org.elasticsearch.search.suggest.SuggestionSearchContext.SuggestionContext; import org.elasticsearch.search.suggest.phrase.NoisyChannelSpellChecker.Result; import java.io.IOException; @@ -53,13 +52,8 @@ import java.util.Map; public final class PhraseSuggester extends Suggester { private final BytesRef SEPARATOR = new BytesRef(" "); private static final String SUGGESTION_TEMPLATE_VAR_NAME = "suggestion"; - private final ScriptService scriptService; - private final IndicesService indicesService; - public PhraseSuggester(ScriptService scriptService, IndicesService indicesService) { - this.scriptService = scriptService; - this.indicesService = indicesService; - } + public static final PhraseSuggester PROTOTYPE = new PhraseSuggester(); /* * More Ideas: @@ -70,8 +64,8 @@ public final class PhraseSuggester extends Suggester { * - phonetic filters could be interesting here too for candidate selection */ @Override - public Suggestion> innerExecute(String name, PhraseSuggestionContext suggestion, IndexSearcher searcher, - CharsRefBuilder spare) throws IOException { + public Suggestion> innerExecute(String name, PhraseSuggestionContext suggestion, + IndexSearcher searcher, CharsRefBuilder spare) throws IOException { double realWordErrorLikelihood = suggestion.realworldErrorLikelyhood(); final PhraseSuggestion response = new PhraseSuggestion(name, suggestion.getSize()); final IndexReader indexReader = searcher.getIndexReader(); @@ -90,14 +84,16 @@ public final class PhraseSuggester extends Suggester { final String suggestField = suggestion.getField(); final Terms suggestTerms = MultiFields.getTerms(indexReader, suggestField); if (gens.size() > 0 && suggestTerms != null) { - final NoisyChannelSpellChecker checker = new NoisyChannelSpellChecker(realWordErrorLikelihood, suggestion.getRequireUnigram(), suggestion.getTokenLimit()); + final NoisyChannelSpellChecker checker = new NoisyChannelSpellChecker(realWordErrorLikelihood, suggestion.getRequireUnigram(), + suggestion.getTokenLimit()); final BytesRef separator = suggestion.separator(); - WordScorer wordScorer = suggestion.model().newScorer(indexReader, suggestTerms, suggestField, realWordErrorLikelihood, separator); + WordScorer wordScorer = suggestion.model().newScorer(indexReader, suggestTerms, suggestField, realWordErrorLikelihood, + separator); Result checkerResult; try (TokenStream stream = checker.tokenStream(suggestion.getAnalyzer(), suggestion.getText(), spare, suggestion.getField())) { - checkerResult = checker.getCorrections(stream, new MultiCandidateGeneratorWrapper(suggestion.getShardSize(), - gens.toArray(new CandidateGenerator[gens.size()])), suggestion.maxErrors(), - suggestion.getShardSize(), wordScorer, suggestion.confidence(), suggestion.gramSize()); + checkerResult = checker.getCorrections(stream, + new MultiCandidateGeneratorWrapper(suggestion.getShardSize(), gens.toArray(new CandidateGenerator[gens.size()])), + suggestion.maxErrors(), suggestion.getShardSize(), wordScorer, suggestion.confidence(), suggestion.gramSize()); } PhraseSuggestion.Entry resultEntry = buildResultEntry(suggestion, spare, checkerResult.cutoffScore); @@ -115,10 +111,10 @@ public final class PhraseSuggester extends Suggester { // from the index for a correction, collateMatch is updated final Map vars = suggestion.getCollateScriptParams(); vars.put(SUGGESTION_TEMPLATE_VAR_NAME, spare.toString()); + ScriptService scriptService = suggestion.getShardContext().getScriptService(); final ExecutableScript executable = scriptService.executable(collateScript, vars); final BytesReference querySource = (BytesReference) executable.run(); - IndexService indexService = indicesService.indexService(suggestion.getIndex()); - final ParsedQuery parsedQuery = indexService.newQueryShardContext().parse(querySource); + final ParsedQuery parsedQuery = suggestion.getShardContext().parse(querySource); collateMatch = Lucene.exists(searcher, parsedQuery.query()); } if (!collateMatch && !collatePrune) { @@ -142,18 +138,14 @@ public final class PhraseSuggester extends Suggester { return response; } - private PhraseSuggestion.Entry buildResultEntry(PhraseSuggestionContext suggestion, CharsRefBuilder spare, double cutoffScore) { + private PhraseSuggestion.Entry buildResultEntry(SuggestionContext suggestion, CharsRefBuilder spare, double cutoffScore) { spare.copyUTF8Bytes(suggestion.getText()); return new PhraseSuggestion.Entry(new Text(spare.toString()), 0, spare.length(), cutoffScore); } - ScriptService scriptService() { - return scriptService; - } - @Override - public SuggestContextParser getContextParser() { - return new PhraseSuggestParser(this); + public SuggestionBuilder getBuilderPrototype() { + return PhraseSuggestionBuilder.PROTOTYPE; } } diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java index 09b4012d5b7..a4793dfbdaa 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java @@ -18,26 +18,34 @@ */ package org.elasticsearch.search.suggest.phrase; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.Terms; -import org.apache.lucene.util.BytesRef; + +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.index.analysis.ShingleTokenFilterFactory; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.QueryParseContext; +import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.script.CompiledScript; +import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.Template; -import org.elasticsearch.search.suggest.SuggestBuilder.SuggestionBuilder; -import org.elasticsearch.search.suggest.phrase.WordScorer.WordScorerFactory; +import org.elasticsearch.search.suggest.SuggestUtils; +import org.elasticsearch.search.suggest.SuggestionBuilder; +import org.elasticsearch.search.suggest.SuggestionSearchContext.SuggestionContext; +import org.elasticsearch.search.suggest.phrase.PhraseSuggestionContext.DirectCandidateGenerator; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -48,24 +56,69 @@ import java.util.Set; /** * Defines the actual suggest command for phrase suggestions ( phrase). */ -public final class PhraseSuggestionBuilder extends SuggestionBuilder { - private Float maxErrors; - private String separator; - private Float realWordErrorLikelihood; - private Float confidence; - private final Map> generators = new HashMap<>(); +public class PhraseSuggestionBuilder extends SuggestionBuilder { + + private static final String SUGGESTION_NAME = "phrase"; + + public static final PhraseSuggestionBuilder PROTOTYPE = new PhraseSuggestionBuilder("_na_"); + + protected static final ParseField MAXERRORS_FIELD = new ParseField("max_errors"); + protected static final ParseField RWE_LIKELIHOOD_FIELD = new ParseField("real_word_error_likelihood"); + protected static final ParseField SEPARATOR_FIELD = new ParseField("separator"); + protected static final ParseField CONFIDENCE_FIELD = new ParseField("confidence"); + protected static final ParseField GENERATORS_FIELD = new ParseField("shard_size"); + protected static final ParseField GRAMSIZE_FIELD = new ParseField("gram_size"); + protected static final ParseField SMOOTHING_MODEL_FIELD = new ParseField("smoothing"); + protected static final ParseField FORCE_UNIGRAM_FIELD = new ParseField("force_unigrams"); + protected static final ParseField TOKEN_LIMIT_FIELD = new ParseField("token_limit"); + protected static final ParseField HIGHLIGHT_FIELD = new ParseField("highlight"); + protected static final ParseField PRE_TAG_FIELD = new ParseField("pre_tag"); + protected static final ParseField POST_TAG_FIELD = new ParseField("post_tag"); + protected static final ParseField COLLATE_FIELD = new ParseField("collate"); + protected static final ParseField COLLATE_QUERY_FIELD = new ParseField("query"); + protected static final ParseField COLLATE_QUERY_PARAMS = new ParseField("params"); + protected static final ParseField COLLATE_QUERY_PRUNE = new ParseField("prune"); + + private float maxErrors = PhraseSuggestionContext.DEFAULT_MAX_ERRORS; + private String separator = PhraseSuggestionContext.DEFAULT_SEPARATOR; + private float realWordErrorLikelihood = PhraseSuggestionContext.DEFAULT_RWE_ERRORLIKELIHOOD; + private float confidence = PhraseSuggestionContext.DEFAULT_CONFIDENCE; + // gramSize needs to be optional although there is a default, if unset parser try to detect and use shingle size private Integer gramSize; - private SmoothingModel model; - private Boolean forceUnigrams; - private Integer tokenLimit; + private boolean forceUnigrams = PhraseSuggestionContext.DEFAULT_REQUIRE_UNIGRAM; + private int tokenLimit = NoisyChannelSpellChecker.DEFAULT_TOKEN_LIMIT; private String preTag; private String postTag; private Template collateQuery; private Map collateParams; - private Boolean collatePrune; + private boolean collatePrune = PhraseSuggestionContext.DEFAULT_COLLATE_PRUNE; + private SmoothingModel model; + private final Map> generators = new HashMap<>(); - public PhraseSuggestionBuilder(String name) { - super(name, "phrase"); + public PhraseSuggestionBuilder(String field) { + super(field); + } + + /** + * internal copy constructor that copies over all class fields except for the field which is + * set to the one provided in the first argument + */ + private PhraseSuggestionBuilder(String fieldname, PhraseSuggestionBuilder in) { + super(fieldname, in); + maxErrors = in.maxErrors; + separator = in.separator; + realWordErrorLikelihood = in.realWordErrorLikelihood; + confidence = in.confidence; + gramSize = in.gramSize; + forceUnigrams = in.forceUnigrams; + tokenLimit = in.tokenLimit; + preTag = in.preTag; + postTag = in.postTag; + collateQuery = in.collateQuery; + collateParams = in.collateParams; + collatePrune = in.collatePrune; + model = in.model; + generators.putAll(in.generators); } /** @@ -81,6 +134,13 @@ public final class PhraseSuggestionBuilder extends SuggestionBuilder1.0 which corresponds to that only * corrections with at most 1 misspelled term are returned. */ - public PhraseSuggestionBuilder maxErrors(Float maxErrors) { + public PhraseSuggestionBuilder maxErrors(float maxErrors) { + if (maxErrors <= 0.0) { + throw new IllegalArgumentException("max_error must be > 0.0"); + } this.maxErrors = maxErrors; return this; } + /** + * get the maxErrors setting + */ + public Float maxErrors() { + return this.maxErrors; + } + /** * Sets the separator that is used to separate terms in the bigram field. If * not set the whitespace character is used as a separator. */ public PhraseSuggestionBuilder separator(String separator) { + Objects.requireNonNull(separator, "separator cannot be set to null"); this.separator = separator; return this; } + /** + * get the separator that is used to separate terms in the bigram field. + */ + public String separator() { + return this.separator; + } + /** * Sets the likelihood of a term being a misspelled even if the term exists * in the dictionary. The default it 0.95 corresponding to 5% or * the real words are misspelled. */ - public PhraseSuggestionBuilder realWordErrorLikelihood(Float realWordErrorLikelihood) { + public PhraseSuggestionBuilder realWordErrorLikelihood(float realWordErrorLikelihood) { + if (realWordErrorLikelihood <= 0.0) { + throw new IllegalArgumentException("real_word_error_likelihood must be > 0.0"); + } this.realWordErrorLikelihood = realWordErrorLikelihood; return this; } + /** + * get the {@link #realWordErrorLikelihood(float)} parameter + */ + public Float realWordErrorLikelihood() { + return this.realWordErrorLikelihood; + } + /** * Sets the confidence level for this suggester. The confidence level * defines a factor applied to the input phrases score which is used as a @@ -123,11 +211,21 @@ public final class PhraseSuggestionBuilder extends SuggestionBuilder0.0 the top N candidates * are returned. The default is 1.0 */ - public PhraseSuggestionBuilder confidence(Float confidence) { + public PhraseSuggestionBuilder confidence(float confidence) { + if (confidence < 0.0) { + throw new IllegalArgumentException("confidence must be >= 0.0"); + } this.confidence = confidence; return this; } + /** + * get the {@link #confidence()} parameter + */ + public Float confidence() { + return this.confidence; + } + /** * Adds a {@link CandidateGenerator} to this suggester. The * {@link CandidateGenerator} is used to draw candidates for each individual @@ -160,20 +258,44 @@ public final class PhraseSuggestionBuilder extends SuggestionBuilder= 1"); + } this.tokenLimit = tokenLimit; return this; } + /** + * get the {@link #tokenLimit(int)} parameter + */ + public Integer tokenLimit() { + return this.tokenLimit; + } + /** * Setup highlighting for suggestions. If this is called a highlight field * is returned with suggestions wrapping changed tokens with preTag and postTag. @@ -187,6 +309,20 @@ public final class PhraseSuggestionBuilder extends SuggestionBuilder collateParams) { - this.collateParams = collateParams; + Objects.requireNonNull(collateParams, "collate parameters cannot be null."); + this.collateParams = new HashMap<>(collateParams); return this; } + /** + * gets additional params for collate script + */ + public Map collateParams() { + return this.collateParams; + } + /** * Sets whether to prune suggestions after collation */ @@ -219,29 +371,24 @@ public final class PhraseSuggestionBuilder extends SuggestionBuilder>> entrySet = generators.entrySet(); for (Entry> entry : entrySet) { @@ -253,412 +400,331 @@ public final class PhraseSuggestionBuilder extends SuggestionBuilder Katz's - * Backoff. This model is used as the default if no model is configured. - *

    - * See N-Gram - * Smoothing for details. - *

    - */ - public static final class StupidBackoff extends SmoothingModel { - /** - * Default discount parameter for {@link StupidBackoff} smoothing - */ - public static final double DEFAULT_BACKOFF_DISCOUNT = 0.4; - private double discount = DEFAULT_BACKOFF_DISCOUNT; - static final StupidBackoff PROTOTYPE = new StupidBackoff(DEFAULT_BACKOFF_DISCOUNT); - private static final String NAME = "stupid_backoff"; - private static final ParseField DISCOUNT_FIELD = new ParseField("discount"); - - /** - * Creates a Stupid-Backoff smoothing model. - * - * @param discount - * the discount given to lower order ngrams if the higher order ngram doesn't exits - */ - public StupidBackoff(double discount) { - this.discount = discount; - } - - /** - * @return the discount parameter of the model - */ - public double getDiscount() { - return this.discount; - } - - @Override - protected XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(DISCOUNT_FIELD.getPreferredName(), discount); - return builder; - } - - @Override - public String getWriteableName() { - return NAME; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeDouble(discount); - } - - @Override - public StupidBackoff readFrom(StreamInput in) throws IOException { - return new StupidBackoff(in.readDouble()); - } - - @Override - protected boolean doEquals(SmoothingModel other) { - StupidBackoff otherModel = (StupidBackoff) other; - return Objects.equals(discount, otherModel.discount); - } - - @Override - protected final int doHashCode() { - return Objects.hash(discount); - } - - @Override - public SmoothingModel fromXContent(QueryParseContext parseContext) throws IOException { - XContentParser parser = parseContext.parser(); - XContentParser.Token token; - String fieldName = null; - double discount = DEFAULT_BACKOFF_DISCOUNT; - while ((token = parser.nextToken()) != Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - fieldName = parser.currentName(); + @Override + protected PhraseSuggestionBuilder innerFromXContent(QueryParseContext parseContext) throws IOException { + XContentParser parser = parseContext.parser(); + PhraseSuggestionBuilder tmpSuggestion = new PhraseSuggestionBuilder("_na_"); + ParseFieldMatcher parseFieldMatcher = parseContext.parseFieldMatcher(); + XContentParser.Token token; + String currentFieldName = null; + String fieldname = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (parseFieldMatcher.match(currentFieldName, SuggestionBuilder.ANALYZER_FIELD)) { + tmpSuggestion.analyzer(parser.text()); + } else if (parseFieldMatcher.match(currentFieldName, SuggestionBuilder.FIELDNAME_FIELD)) { + fieldname = parser.text(); + } else if (parseFieldMatcher.match(currentFieldName, SuggestionBuilder.SIZE_FIELD)) { + tmpSuggestion.size(parser.intValue()); + } else if (parseFieldMatcher.match(currentFieldName, SuggestionBuilder.SHARDSIZE_FIELD)) { + tmpSuggestion.shardSize(parser.intValue()); + } else if (parseFieldMatcher.match(currentFieldName, PhraseSuggestionBuilder.RWE_LIKELIHOOD_FIELD)) { + tmpSuggestion.realWordErrorLikelihood(parser.floatValue()); + } else if (parseFieldMatcher.match(currentFieldName, PhraseSuggestionBuilder.CONFIDENCE_FIELD)) { + tmpSuggestion.confidence(parser.floatValue()); + } else if (parseFieldMatcher.match(currentFieldName, PhraseSuggestionBuilder.SEPARATOR_FIELD)) { + tmpSuggestion.separator(parser.text()); + } else if (parseFieldMatcher.match(currentFieldName, PhraseSuggestionBuilder.MAXERRORS_FIELD)) { + tmpSuggestion.maxErrors(parser.floatValue()); + } else if (parseFieldMatcher.match(currentFieldName, PhraseSuggestionBuilder.GRAMSIZE_FIELD)) { + tmpSuggestion.gramSize(parser.intValue()); + } else if (parseFieldMatcher.match(currentFieldName, PhraseSuggestionBuilder.FORCE_UNIGRAM_FIELD)) { + tmpSuggestion.forceUnigrams(parser.booleanValue()); + } else if (parseFieldMatcher.match(currentFieldName, PhraseSuggestionBuilder.TOKEN_LIMIT_FIELD)) { + tmpSuggestion.tokenLimit(parser.intValue()); + } else { + throw new ParsingException(parser.getTokenLocation(), + "suggester[phrase] doesn't support field [" + currentFieldName + "]"); } - if (token.isValue() && parseContext.parseFieldMatcher().match(fieldName, DISCOUNT_FIELD)) { - discount = parser.doubleValue(); - } - } - return new StupidBackoff(discount); - } - - @Override - public WordScorerFactory buildWordScorerFactory() { - return (IndexReader reader, Terms terms, String field, double realWordLikelyhood, BytesRef separator) - -> new StupidBackoffScorer(reader, terms, field, realWordLikelyhood, separator, discount); - } - } - - /** - * An additive - * smoothing model. - *

    - * See N-Gram - * Smoothing for details. - *

    - */ - public static final class Laplace extends SmoothingModel { - private double alpha = DEFAULT_LAPLACE_ALPHA; - private static final String NAME = "laplace"; - private static final ParseField ALPHA_FIELD = new ParseField("alpha"); - /** - * Default alpha parameter for laplace smoothing - */ - public static final double DEFAULT_LAPLACE_ALPHA = 0.5; - static final Laplace PROTOTYPE = new Laplace(DEFAULT_LAPLACE_ALPHA); - - /** - * Creates a Laplace smoothing model. - * - */ - public Laplace(double alpha) { - this.alpha = alpha; - } - - /** - * @return the laplace model alpha parameter - */ - public double getAlpha() { - return this.alpha; - } - - @Override - protected XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(ALPHA_FIELD.getPreferredName(), alpha); - return builder; - } - - @Override - public String getWriteableName() { - return NAME; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeDouble(alpha); - } - - @Override - public SmoothingModel readFrom(StreamInput in) throws IOException { - return new Laplace(in.readDouble()); - } - - @Override - protected boolean doEquals(SmoothingModel other) { - Laplace otherModel = (Laplace) other; - return Objects.equals(alpha, otherModel.alpha); - } - - @Override - protected final int doHashCode() { - return Objects.hash(alpha); - } - - @Override - public SmoothingModel fromXContent(QueryParseContext parseContext) throws IOException { - XContentParser parser = parseContext.parser(); - XContentParser.Token token; - String fieldName = null; - double alpha = DEFAULT_LAPLACE_ALPHA; - while ((token = parser.nextToken()) != Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - fieldName = parser.currentName(); - } - if (token.isValue() && parseContext.parseFieldMatcher().match(fieldName, ALPHA_FIELD)) { - alpha = parser.doubleValue(); - } - } - return new Laplace(alpha); - } - - @Override - public WordScorerFactory buildWordScorerFactory() { - return (IndexReader reader, Terms terms, String field, double realWordLikelyhood, BytesRef separator) - -> new LaplaceScorer(reader, terms, field, realWordLikelyhood, separator, alpha); - } - } - - - public static abstract class SmoothingModel implements NamedWriteable, ToXContent { - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(getWriteableName()); - innerToXContent(builder,params); - builder.endObject(); - return builder; - } - - @Override - public final boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null || getClass() != obj.getClass()) { - return false; - } - SmoothingModel other = (SmoothingModel) obj; - return doEquals(other); - } - - @Override - public final int hashCode() { - /* - * Override hashCode here and forward to an abstract method to force extensions of this class to override hashCode in the same - * way that we force them to override equals. This also prevents false positives in CheckStyle's EqualsHashCode check. - */ - return doHashCode(); - } - - public abstract SmoothingModel fromXContent(QueryParseContext parseContext) throws IOException; - - public abstract WordScorerFactory buildWordScorerFactory(); - - /** - * subtype specific implementation of "equals". - */ - protected abstract boolean doEquals(SmoothingModel other); - - protected abstract int doHashCode(); - - protected abstract XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException; - } - - /** - * Linear interpolation smoothing model. - *

    - * See N-Gram - * Smoothing for details. - *

    - */ - public static final class LinearInterpolation extends SmoothingModel { - private static final String NAME = "linear"; - static final LinearInterpolation PROTOTYPE = new LinearInterpolation(0.8, 0.1, 0.1); - private final double trigramLambda; - private final double bigramLambda; - private final double unigramLambda; - private static final ParseField TRIGRAM_FIELD = new ParseField("trigram_lambda"); - private static final ParseField BIGRAM_FIELD = new ParseField("bigram_lambda"); - private static final ParseField UNIGRAM_FIELD = new ParseField("unigram_lambda"); - - /** - * Creates a linear interpolation smoothing model. - * - * Note: the lambdas must sum up to one. - * - * @param trigramLambda - * the trigram lambda - * @param bigramLambda - * the bigram lambda - * @param unigramLambda - * the unigram lambda - */ - public LinearInterpolation(double trigramLambda, double bigramLambda, double unigramLambda) { - double sum = trigramLambda + bigramLambda + unigramLambda; - if (Math.abs(sum - 1.0) > 0.001) { - throw new IllegalArgumentException("linear smoothing lambdas must sum to 1"); - } - this.trigramLambda = trigramLambda; - this.bigramLambda = bigramLambda; - this.unigramLambda = unigramLambda; - } - - public double getTrigramLambda() { - return this.trigramLambda; - } - - public double getBigramLambda() { - return this.bigramLambda; - } - - public double getUnigramLambda() { - return this.unigramLambda; - } - - @Override - protected XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(TRIGRAM_FIELD.getPreferredName(), trigramLambda); - builder.field(BIGRAM_FIELD.getPreferredName(), bigramLambda); - builder.field(UNIGRAM_FIELD.getPreferredName(), unigramLambda); - return builder; - } - - @Override - public String getWriteableName() { - return NAME; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeDouble(trigramLambda); - out.writeDouble(bigramLambda); - out.writeDouble(unigramLambda); - } - - @Override - public LinearInterpolation readFrom(StreamInput in) throws IOException { - return new LinearInterpolation(in.readDouble(), in.readDouble(), in.readDouble()); - } - - @Override - protected boolean doEquals(SmoothingModel other) { - final LinearInterpolation otherModel = (LinearInterpolation) other; - return Objects.equals(trigramLambda, otherModel.trigramLambda) && - Objects.equals(bigramLambda, otherModel.bigramLambda) && - Objects.equals(unigramLambda, otherModel.unigramLambda); - } - - @Override - protected final int doHashCode() { - return Objects.hash(trigramLambda, bigramLambda, unigramLambda); - } - - @Override - public LinearInterpolation fromXContent(QueryParseContext parseContext) throws IOException { - XContentParser parser = parseContext.parser(); - XContentParser.Token token; - String fieldName = null; - double trigramLambda = 0.0; - double bigramLambda = 0.0; - double unigramLambda = 0.0; - ParseFieldMatcher matcher = parseContext.parseFieldMatcher(); - while ((token = parser.nextToken()) != Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - fieldName = parser.currentName(); - } else if (token.isValue()) { - if (matcher.match(fieldName, TRIGRAM_FIELD)) { - trigramLambda = parser.doubleValue(); - if (trigramLambda < 0) { - throw new IllegalArgumentException("trigram_lambda must be positive"); - } - } else if (matcher.match(fieldName, BIGRAM_FIELD)) { - bigramLambda = parser.doubleValue(); - if (bigramLambda < 0) { - throw new IllegalArgumentException("bigram_lambda must be positive"); - } - } else if (matcher.match(fieldName, UNIGRAM_FIELD)) { - unigramLambda = parser.doubleValue(); - if (unigramLambda < 0) { - throw new IllegalArgumentException("unigram_lambda must be positive"); - } - } else { - throw new IllegalArgumentException( - "suggester[phrase][smoothing][linear] doesn't support field [" + fieldName + "]"); + } else if (token == Token.START_ARRAY) { + if (parseFieldMatcher.match(currentFieldName, DirectCandidateGeneratorBuilder.DIRECT_GENERATOR_FIELD)) { + // for now we only have a single type of generators + while ((token = parser.nextToken()) == Token.START_OBJECT) { + tmpSuggestion.addCandidateGenerator(DirectCandidateGeneratorBuilder.PROTOTYPE.fromXContent(parseContext)); } } else { - throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] unknown token [" + token + "] after [" + fieldName + "]"); + throw new ParsingException(parser.getTokenLocation(), + "suggester[phrase] doesn't support array field [" + currentFieldName + "]"); } + } else if (token == Token.START_OBJECT) { + if (parseFieldMatcher.match(currentFieldName, PhraseSuggestionBuilder.SMOOTHING_MODEL_FIELD)) { + ensureNoSmoothing(tmpSuggestion); + tmpSuggestion.smoothingModel(SmoothingModel.fromXContent(parseContext)); + } else if (parseFieldMatcher.match(currentFieldName, PhraseSuggestionBuilder.HIGHLIGHT_FIELD)) { + String preTag = null; + String postTag = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (parseFieldMatcher.match(currentFieldName, PhraseSuggestionBuilder.PRE_TAG_FIELD)) { + preTag = parser.text(); + } else if (parseFieldMatcher.match(currentFieldName, PhraseSuggestionBuilder.POST_TAG_FIELD)) { + postTag = parser.text(); + } else { + throw new ParsingException(parser.getTokenLocation(), + "suggester[phrase][highlight] doesn't support field [" + currentFieldName + "]"); + } + } + } + tmpSuggestion.highlight(preTag, postTag); + } else if (parseFieldMatcher.match(currentFieldName, PhraseSuggestionBuilder.COLLATE_FIELD)) { + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (parseFieldMatcher.match(currentFieldName, PhraseSuggestionBuilder.COLLATE_QUERY_FIELD)) { + if (tmpSuggestion.collateQuery() != null) { + throw new ParsingException(parser.getTokenLocation(), + "suggester[phrase][collate] query already set, doesn't support additional [" + + currentFieldName + "]"); + } + Template template = Template.parse(parser, parseFieldMatcher); + tmpSuggestion.collateQuery(template); + } else if (parseFieldMatcher.match(currentFieldName, PhraseSuggestionBuilder.COLLATE_QUERY_PARAMS)) { + tmpSuggestion.collateParams(parser.map()); + } else if (parseFieldMatcher.match(currentFieldName, PhraseSuggestionBuilder.COLLATE_QUERY_PRUNE)) { + if (parser.isBooleanValue()) { + tmpSuggestion.collatePrune(parser.booleanValue()); + } else { + throw new ParsingException(parser.getTokenLocation(), + "suggester[phrase][collate] prune must be either 'true' or 'false'"); + } + } else { + throw new ParsingException(parser.getTokenLocation(), + "suggester[phrase][collate] doesn't support field [" + currentFieldName + "]"); + } + } + } else { + throw new ParsingException(parser.getTokenLocation(), + "suggester[phrase] doesn't support array field [" + currentFieldName + "]"); + } + } else { + throw new ParsingException(parser.getTokenLocation(), + "suggester[phrase] doesn't support field [" + currentFieldName + "]"); } - return new LinearInterpolation(trigramLambda, bigramLambda, unigramLambda); } - @Override - public WordScorerFactory buildWordScorerFactory() { - return (IndexReader reader, Terms terms, String field, double realWordLikelyhood, BytesRef separator) -> - new LinearInterpoatingScorer(reader, terms, field, realWordLikelyhood, separator, trigramLambda, bigramLambda, - unigramLambda); + // now we should have field name, check and copy fields over to the suggestion builder we return + if (fieldname == null) { + throw new ElasticsearchParseException( + "the required field option [" + SuggestUtils.Fields.FIELD.getPreferredName() + "] is missing"); } + return new PhraseSuggestionBuilder(fieldname, tmpSuggestion); + } + + + @Override + public SuggestionContext build(QueryShardContext context) throws IOException { + PhraseSuggestionContext suggestionContext = new PhraseSuggestionContext(context); + MapperService mapperService = context.getMapperService(); + // copy over common settings to each suggestion builder + populateCommonFields(mapperService, suggestionContext); + + suggestionContext.setSeparator(BytesRefs.toBytesRef(this.separator)); + suggestionContext.setRealWordErrorLikelihood(this.realWordErrorLikelihood); + suggestionContext.setConfidence(this.confidence); + suggestionContext.setMaxErrors(this.maxErrors); + suggestionContext.setSeparator(BytesRefs.toBytesRef(this.separator)); + suggestionContext.setRequireUnigram(this.forceUnigrams); + suggestionContext.setTokenLimit(this.tokenLimit); + suggestionContext.setPreTag(BytesRefs.toBytesRef(this.preTag)); + suggestionContext.setPostTag(BytesRefs.toBytesRef(this.postTag)); + + if (this.gramSize != null) { + suggestionContext.setGramSize(this.gramSize); + } + + for (List candidateGenerators : this.generators.values()) { + for (CandidateGenerator candidateGenerator : candidateGenerators) { + suggestionContext.addGenerator(candidateGenerator.build(mapperService)); + } + } + + if (this.model != null) { + suggestionContext.setModel(this.model.buildWordScorerFactory()); + } + + if (this.collateQuery != null) { + CompiledScript compiledScript = context.getScriptService().compile(this.collateQuery, ScriptContext.Standard.SEARCH, + Collections.emptyMap()); + suggestionContext.setCollateQueryScript(compiledScript); + if (this.collateParams != null) { + suggestionContext.setCollateScriptParams(this.collateParams); + } + suggestionContext.setCollatePrune(this.collatePrune); + } + + if (suggestionContext.model() == null) { + suggestionContext.setModel(StupidBackoffScorer.FACTORY); + } + + if (this.gramSize == null || suggestionContext.generators().isEmpty()) { + final ShingleTokenFilterFactory.Factory shingleFilterFactory = SuggestUtils + .getShingleFilterFactory(suggestionContext.getAnalyzer()); + if (this.gramSize == null) { + // try to detect the shingle size + if (shingleFilterFactory != null) { + suggestionContext.setGramSize(shingleFilterFactory.getMaxShingleSize()); + if (suggestionContext.getAnalyzer() == null && shingleFilterFactory.getMinShingleSize() > 1 + && !shingleFilterFactory.getOutputUnigrams()) { + throw new IllegalArgumentException("The default analyzer for field: [" + suggestionContext.getField() + + "] doesn't emit unigrams. If this is intentional try to set the analyzer explicitly"); + } + } + } + if (suggestionContext.generators().isEmpty()) { + if (shingleFilterFactory != null && shingleFilterFactory.getMinShingleSize() > 1 + && !shingleFilterFactory.getOutputUnigrams() && suggestionContext.getRequireUnigram()) { + throw new IllegalArgumentException("The default candidate generator for phrase suggest can't operate on field: [" + + suggestionContext.getField() + "] since it doesn't emit unigrams. " + + "If this is intentional try to set the candidate generator field explicitly"); + } + // use a default generator on the same field + DirectCandidateGenerator generator = new DirectCandidateGenerator(); + generator.setField(suggestionContext.getField()); + suggestionContext.addGenerator(generator); + } + } + return suggestionContext; + } + + private static void ensureNoSmoothing(PhraseSuggestionBuilder suggestion) { + if (suggestion.smoothingModel() != null) { + throw new IllegalArgumentException("only one smoothing model supported"); + } + } + + @Override + public String getWriteableName() { + return SUGGESTION_NAME; + } + + @Override + public void doWriteTo(StreamOutput out) throws IOException { + out.writeFloat(maxErrors); + out.writeFloat(realWordErrorLikelihood); + out.writeFloat(confidence); + out.writeOptionalVInt(gramSize); + boolean hasModel = model != null; + out.writeBoolean(hasModel); + if (hasModel) { + out.writePhraseSuggestionSmoothingModel(model); + } + out.writeBoolean(forceUnigrams); + out.writeVInt(tokenLimit); + out.writeOptionalString(preTag); + out.writeOptionalString(postTag); + out.writeString(separator); + if (collateQuery != null) { + out.writeBoolean(true); + collateQuery.writeTo(out); + } else { + out.writeBoolean(false); + } + out.writeMap(collateParams); + out.writeOptionalBoolean(collatePrune); + out.writeVInt(this.generators.size()); + for (Entry> entry : this.generators.entrySet()) { + out.writeString(entry.getKey()); + List generatorsList = entry.getValue(); + out.writeVInt(generatorsList.size()); + for (CandidateGenerator generator : generatorsList) { + generator.writeTo(out); + } + } + } + + @Override + public PhraseSuggestionBuilder doReadFrom(StreamInput in, String field) throws IOException { + PhraseSuggestionBuilder builder = new PhraseSuggestionBuilder(field); + builder.maxErrors = in.readFloat(); + builder.realWordErrorLikelihood = in.readFloat(); + builder.confidence = in.readFloat(); + builder.gramSize = in.readOptionalVInt(); + if (in.readBoolean()) { + builder.model = in.readPhraseSuggestionSmoothingModel(); + } + builder.forceUnigrams = in.readBoolean(); + builder.tokenLimit = in.readVInt(); + builder.preTag = in.readOptionalString(); + builder.postTag = in.readOptionalString(); + builder.separator = in.readString(); + if (in.readBoolean()) { + builder.collateQuery = Template.readTemplate(in); + } + builder.collateParams = in.readMap(); + builder.collatePrune = in.readOptionalBoolean(); + int generatorsEntries = in.readVInt(); + for (int i = 0; i < generatorsEntries; i++) { + String type = in.readString(); + int numberOfGenerators = in.readVInt(); + List generatorsList = new ArrayList<>(numberOfGenerators); + for (int g = 0; g < numberOfGenerators; g++) { + DirectCandidateGeneratorBuilder generator = DirectCandidateGeneratorBuilder.PROTOTYPE.readFrom(in); + generatorsList.add(generator); + } + builder.generators.put(type, generatorsList); + } + return builder; + } + + @Override + protected boolean doEquals(PhraseSuggestionBuilder other) { + return Objects.equals(maxErrors, other.maxErrors) && + Objects.equals(separator, other.separator) && + Objects.equals(realWordErrorLikelihood, other.realWordErrorLikelihood) && + Objects.equals(confidence, other.confidence) && + Objects.equals(generators, other.generators) && + Objects.equals(gramSize, other.gramSize) && + Objects.equals(model, other.model) && + Objects.equals(forceUnigrams, other.forceUnigrams) && + Objects.equals(tokenLimit, other.tokenLimit) && + Objects.equals(preTag, other.preTag) && + Objects.equals(postTag, other.postTag) && + Objects.equals(collateQuery, other.collateQuery) && + Objects.equals(collateParams, other.collateParams) && + Objects.equals(collatePrune, other.collatePrune); + } + + @Override + protected int doHashCode() { + return Objects.hash(maxErrors, separator, realWordErrorLikelihood, confidence, + generators, gramSize, model, forceUnigrams, tokenLimit, preTag, postTag, + collateQuery, collateParams, collatePrune); } /** * {@link CandidateGenerator} interface. */ - public interface CandidateGenerator extends ToXContent { + public interface CandidateGenerator extends Writeable, ToXContent { String getType(); CandidateGenerator fromXContent(QueryParseContext parseContext) throws IOException; + + PhraseSuggestionContext.DirectCandidateGenerator build(MapperService mapperService) throws IOException; } } diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionContext.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionContext.java index 736b297fab7..80ac850a38c 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionContext.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionContext.java @@ -20,9 +20,9 @@ package org.elasticsearch.search.suggest.phrase; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.script.CompiledScript; import org.elasticsearch.search.suggest.DirectSpellcheckerSettings; -import org.elasticsearch.search.suggest.Suggester; import org.elasticsearch.search.suggest.SuggestionSearchContext.SuggestionContext; import java.util.ArrayList; @@ -31,26 +31,31 @@ import java.util.List; import java.util.Map; class PhraseSuggestionContext extends SuggestionContext { - private final BytesRef SEPARATOR = new BytesRef(" "); - private float maxErrors = 0.5f; - private BytesRef separator = SEPARATOR; - private float realworldErrorLikelihood = 0.95f; - private List generators = new ArrayList<>(); - private int gramSize = 1; - private float confidence = 1.0f; + static final boolean DEFAULT_COLLATE_PRUNE = false; + static final boolean DEFAULT_REQUIRE_UNIGRAM = true; + static final float DEFAULT_CONFIDENCE = 1.0f; + static final int DEFAULT_GRAM_SIZE = 1; + static final float DEFAULT_RWE_ERRORLIKELIHOOD = 0.95f; + static final float DEFAULT_MAX_ERRORS = 0.5f; + static final String DEFAULT_SEPARATOR = " "; + + private float maxErrors = DEFAULT_MAX_ERRORS; + private BytesRef separator = new BytesRef(DEFAULT_SEPARATOR); + private float realworldErrorLikelihood = DEFAULT_RWE_ERRORLIKELIHOOD; + private int gramSize = DEFAULT_GRAM_SIZE; + private float confidence = DEFAULT_CONFIDENCE; private int tokenLimit = NoisyChannelSpellChecker.DEFAULT_TOKEN_LIMIT; + private boolean requireUnigram = DEFAULT_REQUIRE_UNIGRAM; private BytesRef preTag; private BytesRef postTag; private CompiledScript collateQueryScript; + private boolean prune = DEFAULT_COLLATE_PRUNE; + private List generators = new ArrayList<>(); private Map collateScriptParams = new HashMap<>(1); - private WordScorer.WordScorerFactory scorer; - private boolean requireUnigram = true; - private boolean prune = false; - - public PhraseSuggestionContext(Suggester suggester) { - super(suggester); + public PhraseSuggestionContext(QueryShardContext shardContext) { + super(PhraseSuggester.PROTOTYPE, shardContext); } public float maxErrors() { @@ -149,8 +154,6 @@ class PhraseSuggestionContext extends SuggestionContext { public void postFilter(Analyzer postFilter) { this.postFilter = postFilter; } - - } public void setRequireUnigram(boolean requireUnigram) { @@ -198,7 +201,7 @@ class PhraseSuggestionContext extends SuggestionContext { } void setCollateScriptParams(Map collateScriptParams) { - this.collateScriptParams = collateScriptParams; + this.collateScriptParams = new HashMap<>(collateScriptParams); } void setCollatePrune(boolean prune) { @@ -208,5 +211,4 @@ class PhraseSuggestionContext extends SuggestionContext { boolean collatePrune() { return prune; } - } diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/SmoothingModel.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/SmoothingModel.java new file mode 100644 index 00000000000..0163c560de4 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/SmoothingModel.java @@ -0,0 +1,105 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.suggest.phrase; + +import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryParseContext; +import org.elasticsearch.search.suggest.phrase.WordScorer.WordScorerFactory; + +import java.io.IOException; + +public abstract class SmoothingModel implements NamedWriteable, ToXContent { + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(getWriteableName()); + innerToXContent(builder,params); + builder.endObject(); + return builder; + } + + @Override + public final boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + SmoothingModel other = (SmoothingModel) obj; + return doEquals(other); + } + + @Override + public final int hashCode() { + /* + * Override hashCode here and forward to an abstract method to force + * extensions of this class to override hashCode in the same way that we + * force them to override equals. This also prevents false positives in + * CheckStyle's EqualsHashCode check. + */ + return doHashCode(); + } + + protected abstract int doHashCode(); + + public static SmoothingModel fromXContent(QueryParseContext parseContext) throws IOException { + XContentParser parser = parseContext.parser(); + ParseFieldMatcher parseFieldMatcher = parseContext.parseFieldMatcher(); + XContentParser.Token token; + String fieldName = null; + SmoothingModel model = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + fieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + if (parseFieldMatcher.match(fieldName, LinearInterpolation.PARSE_FIELD)) { + model = LinearInterpolation.PROTOTYPE.innerFromXContent(parseContext); + } else if (parseFieldMatcher.match(fieldName, Laplace.PARSE_FIELD)) { + model = Laplace.PROTOTYPE.innerFromXContent(parseContext); + } else if (parseFieldMatcher.match(fieldName, StupidBackoff.PARSE_FIELD)) { + model = StupidBackoff.PROTOTYPE.innerFromXContent(parseContext); + } else { + throw new IllegalArgumentException("suggester[phrase] doesn't support object field [" + fieldName + "]"); + } + } else { + throw new ParsingException(parser.getTokenLocation(), + "[smoothing] unknown token [" + token + "] after [" + fieldName + "]"); + } + } + return model; + } + + public abstract SmoothingModel innerFromXContent(QueryParseContext parseContext) throws IOException; + + public abstract WordScorerFactory buildWordScorerFactory(); + + /** + * subtype specific implementation of "equals". + */ + protected abstract boolean doEquals(SmoothingModel other); + + protected abstract XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException; +} \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/StupidBackoff.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/StupidBackoff.java new file mode 100644 index 00000000000..951f7f917ff --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/StupidBackoff.java @@ -0,0 +1,129 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.suggest.phrase; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.Terms; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.index.query.QueryParseContext; +import org.elasticsearch.search.suggest.phrase.WordScorer.WordScorerFactory; + +import java.io.IOException; +import java.util.Objects; + +/** + * A "stupid-backoff" smoothing model similar to Katz's + * Backoff. This model is used as the default if no model is configured. + *

    + * See N-Gram + * Smoothing for details. + *

    + */ +public final class StupidBackoff extends SmoothingModel { + /** + * Default discount parameter for {@link StupidBackoff} smoothing + */ + public static final double DEFAULT_BACKOFF_DISCOUNT = 0.4; + public static final StupidBackoff PROTOTYPE = new StupidBackoff(DEFAULT_BACKOFF_DISCOUNT); + private double discount = DEFAULT_BACKOFF_DISCOUNT; + private static final String NAME = "stupid_backoff"; + private static final ParseField DISCOUNT_FIELD = new ParseField("discount"); + static final ParseField PARSE_FIELD = new ParseField(NAME); + + /** + * Creates a Stupid-Backoff smoothing model. + * + * @param discount + * the discount given to lower order ngrams if the higher order ngram doesn't exits + */ + public StupidBackoff(double discount) { + this.discount = discount; + } + + /** + * @return the discount parameter of the model + */ + public double getDiscount() { + return this.discount; + } + + @Override + protected XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(DISCOUNT_FIELD.getPreferredName(), discount); + return builder; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeDouble(discount); + } + + @Override + public StupidBackoff readFrom(StreamInput in) throws IOException { + return new StupidBackoff(in.readDouble()); + } + + @Override + protected boolean doEquals(SmoothingModel other) { + StupidBackoff otherModel = (StupidBackoff) other; + return Objects.equals(discount, otherModel.discount); + } + + @Override + protected final int doHashCode() { + return Objects.hash(discount); + } + + @Override + public SmoothingModel innerFromXContent(QueryParseContext parseContext) throws IOException { + XContentParser parser = parseContext.parser(); + XContentParser.Token token; + String fieldName = null; + double discount = DEFAULT_BACKOFF_DISCOUNT; + while ((token = parser.nextToken()) != Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + fieldName = parser.currentName(); + } + if (token.isValue() && parseContext.parseFieldMatcher().match(fieldName, DISCOUNT_FIELD)) { + discount = parser.doubleValue(); + } + } + return new StupidBackoff(discount); + } + + @Override + public WordScorerFactory buildWordScorerFactory() { + return (IndexReader reader, Terms terms, String field, double realWordLikelyhood, BytesRef separator) + -> new StupidBackoffScorer(reader, terms, field, realWordLikelyhood, separator, discount); + } +} \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/WordScorer.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/WordScorer.java index 9557715bcb9..69e62c1a175 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/WordScorer.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/WordScorer.java @@ -77,7 +77,7 @@ public abstract class WordScorer { } return candidate.stringDistance; } - + public double score(Candidate[] path, CandidateSet[] candidateSet, int at, int gramSize) throws IOException { if (at == 0 || gramSize == 1) { return Math.log10(channelScore(path[at], candidateSet[at].originalTerm) * scoreUnigram(path[at])); @@ -87,21 +87,21 @@ public abstract class WordScorer { return Math.log10(channelScore(path[at], candidateSet[at].originalTerm) * scoreTrigram(path[at], path[at - 1], path[at - 2])); } } - + protected double scoreUnigram(Candidate word) throws IOException { return (1.0 + frequency(word.term)) / (vocabluarySize + numTerms); } - + protected double scoreBigram(Candidate word, Candidate w_1) throws IOException { return scoreUnigram(word); } - + protected double scoreTrigram(Candidate word, Candidate w_1, Candidate w_2) throws IOException { return scoreBigram(word, w_1); } - public static interface WordScorerFactory { - public WordScorer newScorer(IndexReader reader, Terms terms, - String field, double realWordLikelyhood, BytesRef separator) throws IOException; + public interface WordScorerFactory { + WordScorer newScorer(IndexReader reader, Terms terms, + String field, double realWordLikelyhood, BytesRef separator) throws IOException; } } diff --git a/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestParser.java b/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestParser.java deleted file mode 100644 index a2fd680c215..00000000000 --- a/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestParser.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.search.suggest.term; - -import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.fielddata.IndexFieldDataService; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.search.suggest.DirectSpellcheckerSettings; -import org.elasticsearch.search.suggest.SuggestContextParser; -import org.elasticsearch.search.suggest.SuggestUtils; -import org.elasticsearch.search.suggest.SuggestionSearchContext; - -import java.io.IOException; - -public final class TermSuggestParser implements SuggestContextParser { - - private TermSuggester suggester; - - public TermSuggestParser(TermSuggester suggester) { - this.suggester = suggester; - } - - @Override - public SuggestionSearchContext.SuggestionContext parse(XContentParser parser, MapperService mapperService, IndexFieldDataService fieldDataService) throws IOException { - XContentParser.Token token; - String fieldName = null; - TermSuggestionContext suggestion = new TermSuggestionContext(suggester); - DirectSpellcheckerSettings settings = suggestion.getDirectSpellCheckerSettings(); - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - fieldName = parser.currentName(); - } else if (token.isValue()) { - parseTokenValue(parser, mapperService, fieldName, suggestion, settings, mapperService.getIndexSettings().getParseFieldMatcher()); - } else { - throw new IllegalArgumentException("suggester[term] doesn't support field [" + fieldName + "]"); - } - } - return suggestion; - } - - private void parseTokenValue(XContentParser parser, MapperService mapperService, String fieldName, TermSuggestionContext suggestion, - DirectSpellcheckerSettings settings, ParseFieldMatcher parseFieldMatcher) throws IOException { - if (!(SuggestUtils.parseSuggestContext(parser, mapperService, fieldName, suggestion, parseFieldMatcher) || SuggestUtils.parseDirectSpellcheckerSettings( - parser, fieldName, settings, parseFieldMatcher))) { - throw new IllegalArgumentException("suggester[term] doesn't support [" + fieldName + "]"); - - } - } - -} diff --git a/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggester.java b/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggester.java index 34cd3ad4d56..4bffb2dfe86 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggester.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggester.java @@ -28,9 +28,9 @@ import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.CharsRefBuilder; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.text.Text; -import org.elasticsearch.search.suggest.SuggestContextParser; import org.elasticsearch.search.suggest.SuggestUtils; import org.elasticsearch.search.suggest.Suggester; +import org.elasticsearch.search.suggest.SuggestionBuilder; import org.elasticsearch.search.suggest.SuggestionSearchContext.SuggestionContext; import java.io.IOException; @@ -39,8 +39,11 @@ import java.util.List; public final class TermSuggester extends Suggester { + public static final TermSuggester PROTOTYPE = new TermSuggester(); + @Override - public TermSuggestion innerExecute(String name, TermSuggestionContext suggestion, IndexSearcher searcher, CharsRefBuilder spare) throws IOException { + public TermSuggestion innerExecute(String name, TermSuggestionContext suggestion, IndexSearcher searcher, CharsRefBuilder spare) + throws IOException { DirectSpellChecker directSpellChecker = SuggestUtils.getDirectSpellChecker(suggestion.getDirectSpellCheckerSettings()); final IndexReader indexReader = searcher.getIndexReader(); TermSuggestion response = new TermSuggestion( @@ -63,12 +66,6 @@ public final class TermSuggester extends Suggester { return response; } - @Override - public SuggestContextParser getContextParser() { - return new TermSuggestParser(this); - } - - private List queryTerms(SuggestionContext suggestion, CharsRefBuilder spare) throws IOException { final List result = new ArrayList<>(); final String field = suggestion.getField(); @@ -76,7 +73,7 @@ public final class TermSuggester extends Suggester { @Override public void nextToken() { Term term = new Term(field, BytesRef.deepCopyOf(fillBytesRef(new BytesRefBuilder()))); - result.add(new Token(term, offsetAttr.startOffset(), offsetAttr.endOffset())); + result.add(new Token(term, offsetAttr.startOffset(), offsetAttr.endOffset())); } }, spare); return result; @@ -96,4 +93,9 @@ public final class TermSuggester extends Suggester { } + @Override + public SuggestionBuilder getBuilderPrototype() { + return TermSuggestionBuilder.PROTOTYPE; + } + } diff --git a/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestion.java b/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestion.java index 12f2c93153b..bc4006469ad 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestion.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestion.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; +import org.elasticsearch.search.suggest.SortBy; import org.elasticsearch.search.suggest.Suggest.Suggestion; import org.elasticsearch.search.suggest.Suggest.Suggestion.Entry.Option; @@ -37,6 +38,17 @@ public class TermSuggestion extends Suggestion { public static final Comparator SCORE = new Score(); public static final Comparator FREQUENCY = new Frequency(); + public static final int TYPE = 1; + + private SortBy sort; + + public TermSuggestion() { + } + + public TermSuggestion(String name, int size, SortBy sort) { + super(name, size); + this.sort = sort; + } // Same behaviour as comparators in suggest module, but for SuggestedWord // Highest score first, then highest freq first, then lowest term first @@ -79,17 +91,6 @@ public class TermSuggestion extends Suggestion { } - public static final int TYPE = 1; - private Sort sort; - - public TermSuggestion() { - } - - public TermSuggestion(String name, int size, Sort sort) { - super(name, size); - this.sort = sort; - } - @Override public int getType() { return TYPE; @@ -110,13 +111,13 @@ public class TermSuggestion extends Suggestion { @Override protected void innerReadFrom(StreamInput in) throws IOException { super.innerReadFrom(in); - sort = Sort.fromId(in.readByte()); + sort = SortBy.PROTOTYPE.readFrom(in); } @Override public void innerWriteTo(StreamOutput out) throws IOException { super.innerWriteTo(out); - out.writeByte(sort.id()); + sort.writeTo(out); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java b/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java index 03eb388f003..0cb9d1604a4 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java @@ -16,11 +16,52 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.search.suggest.term; + +import org.apache.lucene.search.spell.DirectSpellChecker; +import org.apache.lucene.search.spell.JaroWinklerDistance; +import org.apache.lucene.search.spell.LevensteinDistance; +import org.apache.lucene.search.spell.LuceneLevenshteinDistance; +import org.apache.lucene.search.spell.NGramDistance; +import org.apache.lucene.search.spell.StringDistance; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.search.suggest.SuggestBuilder.SuggestionBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryParseContext; +import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.search.suggest.DirectSpellcheckerSettings; +import org.elasticsearch.search.suggest.SortBy; +import org.elasticsearch.search.suggest.SuggestUtils; +import org.elasticsearch.search.suggest.SuggestionBuilder; +import org.elasticsearch.search.suggest.SuggestionSearchContext.SuggestionContext; import java.io.IOException; +import java.util.Locale; +import java.util.Objects; + +import static org.elasticsearch.search.suggest.DirectSpellcheckerSettings.DEFAULT_ACCURACY; +import static org.elasticsearch.search.suggest.DirectSpellcheckerSettings.DEFAULT_MAX_EDITS; +import static org.elasticsearch.search.suggest.DirectSpellcheckerSettings.DEFAULT_MAX_INSPECTIONS; +import static org.elasticsearch.search.suggest.DirectSpellcheckerSettings.DEFAULT_MAX_TERM_FREQ; +import static org.elasticsearch.search.suggest.DirectSpellcheckerSettings.DEFAULT_MIN_DOC_FREQ; +import static org.elasticsearch.search.suggest.DirectSpellcheckerSettings.DEFAULT_MIN_WORD_LENGTH; +import static org.elasticsearch.search.suggest.DirectSpellcheckerSettings.DEFAULT_PREFIX_LENGTH; +import static org.elasticsearch.search.suggest.SuggestUtils.Fields.ACCURACY; +import static org.elasticsearch.search.suggest.SuggestUtils.Fields.MAX_EDITS; +import static org.elasticsearch.search.suggest.SuggestUtils.Fields.MAX_INSPECTIONS; +import static org.elasticsearch.search.suggest.SuggestUtils.Fields.MAX_TERM_FREQ; +import static org.elasticsearch.search.suggest.SuggestUtils.Fields.MIN_DOC_FREQ; +import static org.elasticsearch.search.suggest.SuggestUtils.Fields.MIN_WORD_LENGTH; +import static org.elasticsearch.search.suggest.SuggestUtils.Fields.PREFIX_LENGTH; +import static org.elasticsearch.search.suggest.SuggestUtils.Fields.SORT; +import static org.elasticsearch.search.suggest.SuggestUtils.Fields.STRING_DISTANCE; +import static org.elasticsearch.search.suggest.SuggestUtils.Fields.SUGGEST_MODE; /** * Defines the actual suggest command. Each command uses the global options @@ -29,23 +70,39 @@ import java.io.IOException; */ public class TermSuggestionBuilder extends SuggestionBuilder { - private String suggestMode; - private Float accuracy; - private String sort; - private String stringDistance; - private Integer maxEdits; - private Integer maxInspections; - private Float maxTermFreq; - private Integer prefixLength; - private Integer minWordLength; - private Float minDocFreq; - + public static final TermSuggestionBuilder PROTOTYPE = new TermSuggestionBuilder("_na_"); + private static final String SUGGESTION_NAME = "term"; + + private SuggestMode suggestMode = SuggestMode.MISSING; + private float accuracy = DEFAULT_ACCURACY; + private SortBy sort = SortBy.SCORE; + private StringDistanceImpl stringDistance = StringDistanceImpl.INTERNAL; + private int maxEdits = DEFAULT_MAX_EDITS; + private int maxInspections = DEFAULT_MAX_INSPECTIONS; + private float maxTermFreq = DEFAULT_MAX_TERM_FREQ; + private int prefixLength = DEFAULT_PREFIX_LENGTH; + private int minWordLength = DEFAULT_MIN_WORD_LENGTH; + private float minDocFreq = DEFAULT_MIN_DOC_FREQ; + + public TermSuggestionBuilder(String field) { + super(field); + } + /** - * @param name - * The name of this suggestion. This is a required parameter. + * internal copy constructor that copies over all class field except field. */ - public TermSuggestionBuilder(String name) { - super(name, "term"); + private TermSuggestionBuilder(String field, TermSuggestionBuilder in) { + super(field, in); + suggestMode = in.suggestMode; + accuracy = in.accuracy; + sort = in.sort; + stringDistance = in.stringDistance; + maxEdits = in.maxEdits; + maxInspections = in.maxInspections; + maxTermFreq = in.maxTermFreq; + prefixLength = in.prefixLength; + minWordLength = in.minWordLength; + minDocFreq = in.minDocFreq; } /** @@ -61,11 +118,19 @@ public class TermSuggestionBuilder extends SuggestionBuilder */ - public TermSuggestionBuilder suggestMode(String suggestMode) { + public TermSuggestionBuilder suggestMode(SuggestMode suggestMode) { + Objects.requireNonNull(suggestMode, "suggestMode must not be null"); this.suggestMode = suggestMode; return this; } + /** + * Get the suggest mode setting. + */ + public SuggestMode suggestMode() { + return suggestMode; + } + /** * s how similar the suggested terms at least need to be compared to the * original suggest text tokens. A value between 0 and 1 can be specified. @@ -74,11 +139,21 @@ public class TermSuggestionBuilder extends SuggestionBuilder * Default is 0.5 */ - public TermSuggestionBuilder setAccuracy(float accuracy) { + public TermSuggestionBuilder accuracy(float accuracy) { + if (accuracy < 0.0f || accuracy > 1.0f) { + throw new IllegalArgumentException("accuracy must be between 0 and 1"); + } this.accuracy = accuracy; return this; } + /** + * Get the accuracy setting. + */ + public float accuracy() { + return accuracy; + } + /** * Sets how to sort the suggest terms per suggest text token. Two possible * values: @@ -86,19 +161,27 @@ public class TermSuggestionBuilder extends SuggestionBuilderscore - Sort should first be based on score, then * document frequency and then the term itself. *
  • frequency - Sort should first be based on document - * frequency, then scotr and then the term itself. + * frequency, then score and then the term itself. * *

    * What the score is depends on the suggester being used. */ - public TermSuggestionBuilder sort(String sort) { + public TermSuggestionBuilder sort(SortBy sort) { + Objects.requireNonNull(sort, "sort must not be null"); this.sort = sort; return this; } + /** + * Get the sort setting. + */ + public SortBy sort() { + return sort; + } + /** * Sets what string distance implementation to use for comparing how similar - * suggested terms are. Four possible values can be specified: + * suggested terms are. Five possible values can be specified: *

      *
    1. internal - This is the default and is based on * damerau_levenshtein, but highly optimized for comparing @@ -113,32 +196,60 @@ public class TermSuggestionBuilder extends SuggestionBuilder */ - public TermSuggestionBuilder stringDistance(String stringDistance) { + public TermSuggestionBuilder stringDistance(StringDistanceImpl stringDistance) { + Objects.requireNonNull(stringDistance, "stringDistance must not be null"); this.stringDistance = stringDistance; return this; } + /** + * Get the string distance implementation setting. + */ + public StringDistanceImpl stringDistance() { + return stringDistance; + } + /** * Sets the maximum edit distance candidate suggestions can have in order to * be considered as a suggestion. Can only be a value between 1 and 2. Any * other value result in an bad request error being thrown. Defaults to * 2. */ - public TermSuggestionBuilder maxEdits(Integer maxEdits) { + public TermSuggestionBuilder maxEdits(int maxEdits) { + if (maxEdits < 1 || maxEdits > 2) { + throw new IllegalArgumentException("maxEdits must be between 1 and 2"); + } this.maxEdits = maxEdits; return this; } + /** + * Get the maximum edit distance setting. + */ + public int maxEdits() { + return maxEdits; + } + /** * A factor that is used to multiply with the size in order to inspect more * candidate suggestions. Can improve accuracy at the cost of performance. * Defaults to 5. */ - public TermSuggestionBuilder maxInspections(Integer maxInspections) { + public TermSuggestionBuilder maxInspections(int maxInspections) { + if (maxInspections < 0) { + throw new IllegalArgumentException("maxInspections must be positive"); + } this.maxInspections = maxInspections; return this; } + /** + * Get the factor for inspecting more candidate suggestions setting. + */ + public int maxInspections() { + return maxInspections; + } + /** * Sets a maximum threshold in number of documents a suggest text token can * exist in order to be corrected. Can be a relative percentage number (e.g @@ -151,10 +262,23 @@ public class TermSuggestionBuilder extends SuggestionBuilder 1.0f && maxTermFreq != Math.floor(maxTermFreq)) { + throw new IllegalArgumentException("if maxTermFreq is greater than 1, it must not be a fraction"); + } this.maxTermFreq = maxTermFreq; return this; } + /** + * Get the maximum term frequency threshold setting. + */ + public float maxTermFreq() { + return maxTermFreq; + } + /** * Sets the number of minimal prefix characters that must match in order be * a candidate suggestion. Defaults to 1. Increasing this number improves @@ -162,19 +286,39 @@ public class TermSuggestionBuilder extends SuggestionBuilder4. */ public TermSuggestionBuilder minWordLength(int minWordLength) { + if (minWordLength < 1) { + throw new IllegalArgumentException("minWordLength must be greater or equal to 1"); + } this.minWordLength = minWordLength; return this; } + /** + * Get the minimum length of a text term to be corrected setting. + */ + public int minWordLength() { + return minWordLength; + } + /** * Sets a minimal threshold in number of documents a suggested term should * appear in. This can be specified as an absolute number or as a relative @@ -183,42 +327,294 @@ public class TermSuggestionBuilder extends SuggestionBuilder 1.0f && minDocFreq != Math.floor(minDocFreq)) { + throw new IllegalArgumentException("if minDocFreq is greater than 1, it must not be a fraction"); + } this.minDocFreq = minDocFreq; return this; } + /** + * Get the minimal threshold for the frequency of a term appearing in the + * document set setting. + */ + public float minDocFreq() { + return minDocFreq; + } + @Override public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { - if (suggestMode != null) { - builder.field("suggest_mode", suggestMode); - } - if (accuracy != null) { - builder.field("accuracy", accuracy); - } - if (sort != null) { - builder.field("sort", sort); - } - if (stringDistance != null) { - builder.field("string_distance", stringDistance); - } - if (maxEdits != null) { - builder.field("max_edits", maxEdits); - } - if (maxInspections != null) { - builder.field("max_inspections", maxInspections); - } - if (maxTermFreq != null) { - builder.field("max_term_freq", maxTermFreq); - } - if (prefixLength != null) { - builder.field("prefix_length", prefixLength); - } - if (minWordLength != null) { - builder.field("min_word_length", minWordLength); - } - if (minDocFreq != null) { - builder.field("min_doc_freq", minDocFreq); - } + builder.field(SUGGEST_MODE.getPreferredName(), suggestMode); + builder.field(ACCURACY.getPreferredName(), accuracy); + builder.field(SORT.getPreferredName(), sort); + builder.field(STRING_DISTANCE.getPreferredName(), stringDistance); + builder.field(MAX_EDITS.getPreferredName(), maxEdits); + builder.field(MAX_INSPECTIONS.getPreferredName(), maxInspections); + builder.field(MAX_TERM_FREQ.getPreferredName(), maxTermFreq); + builder.field(PREFIX_LENGTH.getPreferredName(), prefixLength); + builder.field(MIN_WORD_LENGTH.getPreferredName(), minWordLength); + builder.field(MIN_DOC_FREQ.getPreferredName(), minDocFreq); return builder; } + + @Override + protected TermSuggestionBuilder innerFromXContent(QueryParseContext parseContext) throws IOException { + XContentParser parser = parseContext.parser(); + TermSuggestionBuilder tmpSuggestion = new TermSuggestionBuilder("_na_"); + ParseFieldMatcher parseFieldMatcher = parseContext.parseFieldMatcher(); + XContentParser.Token token; + String currentFieldName = null; + String fieldname = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (parseFieldMatcher.match(currentFieldName, SuggestionBuilder.ANALYZER_FIELD)) { + tmpSuggestion.analyzer(parser.text()); + } else if (parseFieldMatcher.match(currentFieldName, SuggestionBuilder.FIELDNAME_FIELD)) { + fieldname = parser.text(); + } else if (parseFieldMatcher.match(currentFieldName, SuggestionBuilder.SIZE_FIELD)) { + tmpSuggestion.size(parser.intValue()); + } else if (parseFieldMatcher.match(currentFieldName, SuggestionBuilder.SHARDSIZE_FIELD)) { + tmpSuggestion.shardSize(parser.intValue()); + } else if (parseFieldMatcher.match(currentFieldName, SUGGEST_MODE)) { + tmpSuggestion.suggestMode(SuggestMode.resolve(parser.text())); + } else if (parseFieldMatcher.match(currentFieldName, ACCURACY)) { + tmpSuggestion.accuracy(parser.floatValue()); + } else if (parseFieldMatcher.match(currentFieldName, SORT)) { + tmpSuggestion.sort(SortBy.resolve(parser.text())); + } else if (parseFieldMatcher.match(currentFieldName, STRING_DISTANCE)) { + tmpSuggestion.stringDistance(StringDistanceImpl.resolve(parser.text())); + } else if (parseFieldMatcher.match(currentFieldName, MAX_EDITS)) { + tmpSuggestion.maxEdits(parser.intValue()); + } else if (parseFieldMatcher.match(currentFieldName, MAX_INSPECTIONS)) { + tmpSuggestion.maxInspections(parser.intValue()); + } else if (parseFieldMatcher.match(currentFieldName, MAX_TERM_FREQ)) { + tmpSuggestion.maxTermFreq(parser.floatValue()); + } else if (parseFieldMatcher.match(currentFieldName, PREFIX_LENGTH)) { + tmpSuggestion.prefixLength(parser.intValue()); + } else if (parseFieldMatcher.match(currentFieldName, MIN_WORD_LENGTH)) { + tmpSuggestion.minWordLength(parser.intValue()); + } else if (parseFieldMatcher.match(currentFieldName, MIN_DOC_FREQ)) { + tmpSuggestion.minDocFreq(parser.floatValue()); + } else { + throw new ParsingException(parser.getTokenLocation(), + "suggester[term] doesn't support field [" + currentFieldName + "]"); + } + } else { + throw new ParsingException(parser.getTokenLocation(), "suggester[term] parsing failed on [" + currentFieldName + "]"); + } + } + + // now we should have field name, check and copy fields over to the suggestion builder we return + if (fieldname == null) { + throw new ElasticsearchParseException( + "the required field option [" + SuggestUtils.Fields.FIELD.getPreferredName() + "] is missing"); + } + return new TermSuggestionBuilder(fieldname, tmpSuggestion); + } + + @Override + public SuggestionContext build(QueryShardContext context) throws IOException { + TermSuggestionContext suggestionContext = new TermSuggestionContext(context); + // copy over common settings to each suggestion builder + populateCommonFields(context.getMapperService(), suggestionContext); + // Transfers the builder settings to the target TermSuggestionContext + DirectSpellcheckerSettings settings = suggestionContext.getDirectSpellCheckerSettings(); + settings.accuracy(accuracy); + settings.maxEdits(maxEdits); + settings.maxInspections(maxInspections); + settings.maxTermFreq(maxTermFreq); + settings.minDocFreq(minDocFreq); + settings.minWordLength(minWordLength); + settings.prefixLength(prefixLength); + settings.sort(sort); + settings.stringDistance(stringDistance.toLucene()); + settings.suggestMode(suggestMode.toLucene()); + return suggestionContext; + } + + @Override + public String getWriteableName() { + return SUGGESTION_NAME; + } + + @Override + public void doWriteTo(StreamOutput out) throws IOException { + suggestMode.writeTo(out); + out.writeFloat(accuracy); + sort.writeTo(out); + stringDistance.writeTo(out); + out.writeVInt(maxEdits); + out.writeVInt(maxInspections); + out.writeFloat(maxTermFreq); + out.writeVInt(prefixLength); + out.writeVInt(minWordLength); + out.writeFloat(minDocFreq); + } + + @Override + public TermSuggestionBuilder doReadFrom(StreamInput in, String field) throws IOException { + TermSuggestionBuilder builder = new TermSuggestionBuilder(field); + builder.suggestMode = SuggestMode.PROTOTYPE.readFrom(in); + builder.accuracy = in.readFloat(); + builder.sort = SortBy.PROTOTYPE.readFrom(in); + builder.stringDistance = StringDistanceImpl.PROTOTYPE.readFrom(in); + builder.maxEdits = in.readVInt(); + builder.maxInspections = in.readVInt(); + builder.maxTermFreq = in.readFloat(); + builder.prefixLength = in.readVInt(); + builder.minWordLength = in.readVInt(); + builder.minDocFreq = in.readFloat(); + return builder; + } + + @Override + protected boolean doEquals(TermSuggestionBuilder other) { + return Objects.equals(suggestMode, other.suggestMode) && + Objects.equals(accuracy, other.accuracy) && + Objects.equals(sort, other.sort) && + Objects.equals(stringDistance, other.stringDistance) && + Objects.equals(maxEdits, other.maxEdits) && + Objects.equals(maxInspections, other.maxInspections) && + Objects.equals(maxTermFreq, other.maxTermFreq) && + Objects.equals(prefixLength, other.prefixLength) && + Objects.equals(minWordLength, other.minWordLength) && + Objects.equals(minDocFreq, other.minDocFreq); + } + + @Override + protected int doHashCode() { + return Objects.hash(suggestMode, accuracy, sort, stringDistance, maxEdits, maxInspections, + maxTermFreq, prefixLength, minWordLength, minDocFreq); + } + + /** An enum representing the valid suggest modes. */ + public enum SuggestMode implements Writeable { + /** Only suggest terms in the suggest text that aren't in the index. This is the default. */ + MISSING { + @Override + public org.apache.lucene.search.spell.SuggestMode toLucene() { + return org.apache.lucene.search.spell.SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX; + } + }, + /** Only suggest terms that occur in more docs then the original suggest text term. */ + POPULAR { + @Override + public org.apache.lucene.search.spell.SuggestMode toLucene() { + return org.apache.lucene.search.spell.SuggestMode.SUGGEST_MORE_POPULAR; + } + }, + /** Suggest any matching suggest terms based on tokens in the suggest text. */ + ALWAYS { + @Override + public org.apache.lucene.search.spell.SuggestMode toLucene() { + return org.apache.lucene.search.spell.SuggestMode.SUGGEST_ALWAYS; + } + }; + + protected static SuggestMode PROTOTYPE = MISSING; + + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeVInt(ordinal()); + } + + @Override + public SuggestMode readFrom(final StreamInput in) throws IOException { + int ordinal = in.readVInt(); + if (ordinal < 0 || ordinal >= values().length) { + throw new IOException("Unknown SuggestMode ordinal [" + ordinal + "]"); + } + return values()[ordinal]; + } + + public static SuggestMode resolve(final String str) { + Objects.requireNonNull(str, "Input string is null"); + return valueOf(str.toUpperCase(Locale.ROOT)); + } + + public abstract org.apache.lucene.search.spell.SuggestMode toLucene(); + } + + /** An enum representing the valid string edit distance algorithms for determining suggestions. */ + public enum StringDistanceImpl implements Writeable { + /** This is the default and is based on damerau_levenshtein, but highly optimized + * for comparing string distance for terms inside the index. */ + INTERNAL { + @Override + public StringDistance toLucene() { + return DirectSpellChecker.INTERNAL_LEVENSHTEIN; + } + }, + /** String distance algorithm based on Damerau-Levenshtein algorithm. */ + DAMERAU_LEVENSHTEIN { + @Override + public StringDistance toLucene() { + return new LuceneLevenshteinDistance(); + } + }, + /** String distance algorithm based on Levenstein edit distance algorithm. */ + LEVENSTEIN { + @Override + public StringDistance toLucene() { + return new LevensteinDistance(); + } + }, + /** String distance algorithm based on Jaro-Winkler algorithm. */ + JAROWINKLER { + @Override + public StringDistance toLucene() { + return new JaroWinklerDistance(); + } + }, + /** String distance algorithm based on character n-grams. */ + NGRAM { + @Override + public StringDistance toLucene() { + return new NGramDistance(); + } + }; + + protected static StringDistanceImpl PROTOTYPE = INTERNAL; + + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeVInt(ordinal()); + } + + @Override + public StringDistanceImpl readFrom(final StreamInput in) throws IOException { + int ordinal = in.readVInt(); + if (ordinal < 0 || ordinal >= values().length) { + throw new IOException("Unknown StringDistanceImpl ordinal [" + ordinal + "]"); + } + return values()[ordinal]; + } + + public static StringDistanceImpl resolve(final String str) { + Objects.requireNonNull(str, "Input string is null"); + final String distanceVal = str.toLowerCase(Locale.US); + switch (distanceVal) { + case "internal": + return INTERNAL; + case "damerau_levenshtein": + case "damerauLevenshtein": + return DAMERAU_LEVENSHTEIN; + case "levenstein": + return LEVENSTEIN; + case "ngram": + return NGRAM; + case "jarowinkler": + return JAROWINKLER; + default: throw new IllegalArgumentException("Illegal distance option " + str); + } + } + + public abstract StringDistance toLucene(); + } + } diff --git a/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionContext.java b/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionContext.java index 4ff32d797ef..5102ef99014 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionContext.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionContext.java @@ -18,20 +18,25 @@ */ package org.elasticsearch.search.suggest.term; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.suggest.DirectSpellcheckerSettings; -import org.elasticsearch.search.suggest.Suggester; import org.elasticsearch.search.suggest.SuggestionSearchContext.SuggestionContext; final class TermSuggestionContext extends SuggestionContext { private final DirectSpellcheckerSettings settings = new DirectSpellcheckerSettings(); - public TermSuggestionContext(Suggester suggester) { - super(suggester); + public TermSuggestionContext(QueryShardContext shardContext) { + super(TermSuggester.PROTOTYPE, shardContext); } public DirectSpellcheckerSettings getDirectSpellCheckerSettings() { return settings; } -} \ No newline at end of file + @Override + public String toString() { + return "SpellcheckerSettings" + settings + ", BaseSettings" + super.toString(); + } + +} diff --git a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java index c6f189ea8a2..be2d6ccfea1 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -26,7 +26,6 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ClusterStateUpdateTask; @@ -49,6 +48,7 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -774,6 +774,33 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis return false; } + /** + * Check if any of the indices to be closed are currently being restored from a snapshot and fail closing if such an index + * is found as closing an index that is being restored makes the index unusable (it cannot be recovered). + */ + public static void checkIndexClosing(ClusterState currentState, Set indices) { + RestoreInProgress restore = currentState.custom(RestoreInProgress.TYPE); + if (restore != null) { + Set indicesToFail = null; + for (RestoreInProgress.Entry entry : restore.entries()) { + for (ObjectObjectCursor shard : entry.shards()) { + if (!shard.value.state().completed()) { + IndexMetaData indexMetaData = currentState.metaData().index(shard.key.getIndex()); + if (indexMetaData != null && indices.contains(indexMetaData)) { + if (indicesToFail == null) { + indicesToFail = new HashSet<>(); + } + indicesToFail.add(shard.key.getIndex()); + } + } + } + } + if (indicesToFail != null) { + throw new IllegalArgumentException("Cannot close indices that are being restored: " + indicesToFail); + } + } + } + /** * Adds restore completion listener *

      diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index 340a7f6ce83..4a15dbdac2e 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -23,13 +23,13 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.lucene.index.IndexCommit; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.metadata.SnapshotId; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; @@ -343,9 +343,9 @@ public class SnapshotShardsService extends AbstractLifecycleComponent indices = Arrays.asList(indexNameExpressionResolver.concreteIndices(currentState, request.indicesOptions(), request.indices())); + List indices = Arrays.asList(indexNameExpressionResolver.concreteIndexNames(currentState, request.indicesOptions(), request.indices())); logger.trace("[{}][{}] creating snapshot for indices [{}]", request.repository(), request.name(), indices); - newSnapshot = new SnapshotsInProgress.Entry(snapshotId, request.includeGlobalState(), State.INIT, indices, System.currentTimeMillis(), null); + newSnapshot = new SnapshotsInProgress.Entry(snapshotId, request.includeGlobalState(), request.partial(), State.INIT, indices, System.currentTimeMillis(), null); snapshots = new SnapshotsInProgress(newSnapshot); } else { // TODO: What should we do if a snapshot is already running? @@ -228,7 +229,7 @@ public class SnapshotsService extends AbstractLifecycleComponent closed = new HashSet<>(); for (ObjectObjectCursor entry : shards) { if (entry.value.state() == State.MISSING) { - if (metaData.hasIndex(entry.key.getIndex().getName()) && metaData.index(entry.key.getIndex()).getState() == IndexMetaData.State.CLOSE) { + if (metaData.hasIndex(entry.key.getIndex().getName()) && metaData.getIndexSafe(entry.key.getIndex()).getState() == IndexMetaData.State.CLOSE) { closed.add(entry.key.getIndex().getName()); } else { missing.add(entry.key.getIndex().getName()); @@ -841,7 +842,7 @@ public class SnapshotsService extends AbstractLifecycleComponent indices) { + Set indicesToFail = indicesToFailForCloseOrDeletion(currentState, indices); + if (indicesToFail != null) { + throw new IllegalArgumentException("Cannot delete indices that are being snapshotted: " + indicesToFail + + ". Try again after snapshot finishes or cancel the currently running snapshot."); + } + } + + /** + * Check if any of the indices to be closed are currently being snapshotted. Fail as closing an index that is being + * snapshotted (with partial == false) makes the snapshot fail. + */ + public static void checkIndexClosing(ClusterState currentState, Set indices) { + Set indicesToFail = indicesToFailForCloseOrDeletion(currentState, indices); + if (indicesToFail != null) { + throw new IllegalArgumentException("Cannot close indices that are being snapshotted: " + indicesToFail + + ". Try again after snapshot finishes or cancel the currently running snapshot."); + } + } + + private static Set indicesToFailForCloseOrDeletion(ClusterState currentState, Set indices) { + SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); + Set indicesToFail = null; + if (snapshots != null) { + for (final SnapshotsInProgress.Entry entry : snapshots.entries()) { + if (entry.partial() == false) { + if (entry.state() == State.INIT) { + for (String index : entry.indices()) { + IndexMetaData indexMetaData = currentState.metaData().index(index); + if (indexMetaData != null && indices.contains(indexMetaData)) { + if (indicesToFail == null) { + indicesToFail = new HashSet<>(); + } + indicesToFail.add(indexMetaData.getIndex()); + } + } + } else { + for (ObjectObjectCursor shard : entry.shards()) { + if (!shard.value.state().completed()) { + IndexMetaData indexMetaData = currentState.metaData().index(shard.key.getIndex()); + if (indexMetaData != null && indices.contains(indexMetaData)) { + if (indicesToFail == null) { + indicesToFail = new HashSet<>(); + } + indicesToFail.add(shard.key.getIndex()); + } + } + } + } + } + } + } + return indicesToFail; + } + /** * Adds snapshot completion listener * @@ -1302,6 +1362,15 @@ public class SnapshotsService extends AbstractLifecycleComponent THREADPOOL_GROUP_SETTING = Setting.groupSetting("threadpool.", true, Setting.Scope.CLUSTER); + public static final Setting THREADPOOL_GROUP_SETTING = + Setting.groupSetting("threadpool.", Property.Dynamic, Property.NodeScope); private volatile Map executors; @@ -222,13 +222,12 @@ public class ThreadPool extends AbstractComponent implements Closeable { int halfProcMaxAt5 = Math.min(((availableProcessors + 1) / 2), 5); int halfProcMaxAt10 = Math.min(((availableProcessors + 1) / 2), 10); Map defaultExecutorTypeSettings = new HashMap<>(); - add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.GENERIC).keepAlive("30s")); + add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.GENERIC).size(4 * availableProcessors).keepAlive("30s")); add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.INDEX).size(availableProcessors).queueSize(200)); add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.BULK).size(availableProcessors).queueSize(50)); add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.GET).size(availableProcessors).queueSize(1000)); add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.SEARCH).size(((availableProcessors * 3) / 2) + 1).queueSize(1000)); add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.SUGGEST).size(availableProcessors).queueSize(1000)); - add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.PERCOLATE).size(availableProcessors).queueSize(1000)); add(defaultExecutorTypeSettings, new ExecutorSettingsBuilder(Names.MANAGEMENT).size(5).keepAlive("5m")); // no queue as this means clients will need to handle rejections on listener queue even if the operation succeeded // the assumption here is that the listeners should be very lightweight on the listeners side @@ -339,10 +338,18 @@ public class ThreadPool extends AbstractComponent implements Closeable { return new ThreadPoolStats(stats); } + /** + * Get the generic executor. This executor's {@link Executor#execute(Runnable)} method will run the Runnable it is given in + * the {@link ThreadContext} of the thread that queues it. + */ public Executor generic() { return executor(Names.GENERIC); } + /** + * Get the executor with the given name. This executor's {@link Executor#execute(Runnable)} method will run the Runnable it is given in + * the {@link ThreadContext} of the thread that queues it. + */ public Executor executor(String name) { Executor executor = executors.get(name).executor(); if (executor == null) { @@ -355,10 +362,31 @@ public class ThreadPool extends AbstractComponent implements Closeable { return this.scheduler; } + /** + * Schedules a periodic action that always runs on the scheduler thread. + * + * @param command the action to take + * @param interval the delay interval + * @return a ScheduledFuture who's get will return when the task is complete and throw an exception if it is canceled + */ public ScheduledFuture scheduleWithFixedDelay(Runnable command, TimeValue interval) { return scheduler.scheduleWithFixedDelay(new LoggingRunnable(command), interval.millis(), interval.millis(), TimeUnit.MILLISECONDS); } + /** + * Schedules a one-shot command to run after a given delay. The command is not run in the context of the calling thread. To preserve the + * context of the calling thread you may call threadPool.getThreadContext().preserveContext on the runnable before passing + * it to this method. + * + * @param delay delay before the task executes + * @param name the name of the thread pool on which to execute this task. SAME means "execute on the scheduler thread" which changes the + * meaning of the ScheduledFuture returned by this method. In that case the ScheduledFuture will complete only when the command + * completes. + * @param command the command to run + * @return a ScheduledFuture who's get will return when the task is has been added to its target thread pool and throw an exception if + * the task is canceled before it was added to its target thread pool. Once the task has been added to its target thread pool + * the ScheduledFuture will cannot interact with it. + */ public ScheduledFuture schedule(TimeValue delay, String name, Runnable command) { if (!Names.SAME.equals(name)) { command = new ThreadedRunnable(command, executor(name)); diff --git a/core/src/main/java/org/elasticsearch/transport/Transport.java b/core/src/main/java/org/elasticsearch/transport/Transport.java index c930773f39c..532c9d99ace 100644 --- a/core/src/main/java/org/elasticsearch/transport/Transport.java +++ b/core/src/main/java/org/elasticsearch/transport/Transport.java @@ -22,6 +22,7 @@ package org.elasticsearch.transport; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; @@ -35,7 +36,7 @@ import java.util.Map; public interface Transport extends LifecycleComponent { - Setting TRANSPORT_TCP_COMPRESS = Setting.boolSetting("transport.tcp.compress", false, false, Setting.Scope.CLUSTER); + Setting TRANSPORT_TCP_COMPRESS = Setting.boolSetting("transport.tcp.compress", false, Property.NodeScope); void transportServiceAdapter(TransportServiceAdapter service); diff --git a/core/src/main/java/org/elasticsearch/transport/TransportService.java b/core/src/main/java/org/elasticsearch/transport/TransportService.java index 7884ed04af1..2d804bfc786 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportService.java @@ -20,20 +20,18 @@ package org.elasticsearch.transport; import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction; -import org.elasticsearch.action.support.replication.ReplicationTask; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Scope; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; @@ -43,7 +41,6 @@ import org.elasticsearch.common.util.concurrent.ConcurrentMapLong; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.threadpool.ThreadPool; @@ -98,10 +95,11 @@ public class TransportService extends AbstractLifecycleComponent> TRACE_LOG_INCLUDE_SETTING = listSetting("transport.tracer.include", emptyList(), - Function.identity(), true, Scope.CLUSTER); - public static final Setting> TRACE_LOG_EXCLUDE_SETTING = listSetting("transport.tracer.exclude", - Arrays.asList("internal:discovery/zen/fd*", TransportLivenessAction.NAME), Function.identity(), true, Scope.CLUSTER); + public static final Setting> TRACE_LOG_INCLUDE_SETTING = + listSetting("transport.tracer.include", emptyList(), Function.identity(), Property.Dynamic, Property.NodeScope); + public static final Setting> TRACE_LOG_EXCLUDE_SETTING = + listSetting("transport.tracer.exclude", Arrays.asList("internal:discovery/zen/fd*", TransportLivenessAction.NAME), + Function.identity(), Property.Dynamic, Property.NodeScope); private final ESLogger tracerLog; @@ -112,11 +110,11 @@ public class TransportService extends AbstractLifecycleComponent> HOST = listSetting("transport.host", emptyList(), s -> s, false, Scope.CLUSTER); - public static final Setting> PUBLISH_HOST = listSetting("transport.publish_host", HOST, s -> s, false, Scope.CLUSTER); - public static final Setting> BIND_HOST = listSetting("transport.bind_host", HOST, s -> s, false, Scope.CLUSTER); - public static final Setting PORT = new Setting<>("transport.tcp.port", "9300-9400", s -> s, false, Scope.CLUSTER); - public static final Setting PUBLISH_PORT = intSetting("transport.publish_port", -1, -1, false, Scope.CLUSTER); + public static final Setting> HOST = + listSetting("transport.host", emptyList(), Function.identity(), Property.NodeScope); + public static final Setting> PUBLISH_HOST = + listSetting("transport.publish_host", HOST, Function.identity(), Property.NodeScope); + public static final Setting> BIND_HOST = + listSetting("transport.bind_host", HOST, Function.identity(), Property.NodeScope); + public static final Setting PORT = + new Setting<>("transport.tcp.port", "9300-9400", Function.identity(), Property.NodeScope); + public static final Setting PUBLISH_PORT = + intSetting("transport.publish_port", -1, -1, Property.NodeScope); public static final String DEFAULT_PROFILE = "default"; - public static final Setting TRANSPORT_PROFILES_SETTING = groupSetting("transport.profiles.", true, Scope.CLUSTER); + public static final Setting TRANSPORT_PROFILES_SETTING = + groupSetting("transport.profiles.", Property.Dynamic, Property.NodeScope); private TransportSettings() { diff --git a/core/src/main/java/org/elasticsearch/transport/local/LocalTransport.java b/core/src/main/java/org/elasticsearch/transport/local/LocalTransport.java index faef71998a9..a7783148ef3 100644 --- a/core/src/main/java/org/elasticsearch/transport/local/LocalTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/local/LocalTransport.java @@ -272,7 +272,7 @@ public class LocalTransport extends AbstractLifecycleComponent implem handleException(handler, new RemoteTransportException(nodeName(), localAddress, action, e)); } } else { - logger.warn("Failed to receive message for action [" + action + "]", e); + logger.warn("Failed to receive message for action [{}]", e, action); } } } @@ -314,7 +314,7 @@ public class LocalTransport extends AbstractLifecycleComponent implem try { transportChannel.sendResponse(e); } catch (Throwable e1) { - logger.warn("Failed to send error message back to client for action [" + action + "]", e1); + logger.warn("Failed to send error message back to client for action [{}]", e1, action); logger.warn("Actual Exception", e); } } @@ -325,7 +325,7 @@ public class LocalTransport extends AbstractLifecycleComponent implem try { transportChannel.sendResponse(e); } catch (Throwable e1) { - logger.warn("Failed to send error message back to client for action [" + action + "]", e); + logger.warn("Failed to send error message back to client for action [{}]", e, action); logger.warn("Actual Exception", e1); } diff --git a/core/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java b/core/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java index e4dbbfa73af..9eef4401144 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java @@ -127,6 +127,10 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler { } streamIn = compressor.streamInput(streamIn); } + if (version.onOrAfter(Version.CURRENT.minimumCompatibilityVersion()) == false || version.major != Version.CURRENT.major) { + throw new IllegalStateException("Received message from unsupported version: [" + version + + "] minimal compatible version is: [" +Version.CURRENT.minimumCompatibilityVersion() + "]"); + } streamIn.setVersion(version); if (TransportStatus.isRequest(status)) { threadContext.readHeaders(streamIn); @@ -274,7 +278,7 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler { try { transportChannel.sendResponse(e); } catch (IOException e1) { - logger.warn("Failed to send error message back to client for action [" + action + "]", e); + logger.warn("Failed to send error message back to client for action [{}]", e, action); logger.warn("Actual Exception", e1); } } @@ -336,7 +340,7 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler { try { transportChannel.sendResponse(e); } catch (Throwable e1) { - logger.warn("Failed to send error message back to client for action [" + reg.getAction() + "]", e1); + logger.warn("Failed to send error message back to client for action [{}]", e1, reg.getAction()); logger.warn("Actual Exception", e); } } diff --git a/core/src/main/java/org/elasticsearch/transport/netty/NettyInternalESLogger.java b/core/src/main/java/org/elasticsearch/transport/netty/NettyInternalESLogger.java index ed92aa261db..2a1fc3226a4 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/NettyInternalESLogger.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/NettyInternalESLogger.java @@ -19,12 +19,14 @@ package org.elasticsearch.transport.netty; +import org.elasticsearch.common.SuppressLoggerChecks; import org.elasticsearch.common.logging.ESLogger; import org.jboss.netty.logging.AbstractInternalLogger; /** * */ +@SuppressLoggerChecks(reason = "safely delegates to logger") public class NettyInternalESLogger extends AbstractInternalLogger { private final ESLogger logger; diff --git a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java index dc9dd70ab8d..da629a0d47f 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.math.MathUtils; import org.elasticsearch.common.metrics.CounterMetric; @@ -44,7 +45,7 @@ import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.network.NetworkService.TcpSettings; import org.elasticsearch.common.network.NetworkUtils; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Scope; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.InetSocketTransportAddress; @@ -149,40 +150,45 @@ public class NettyTransport extends AbstractLifecycleComponent implem public static final String TRANSPORT_CLIENT_WORKER_THREAD_NAME_PREFIX = "transport_client_worker"; public static final String TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX = "transport_client_boss"; - public static final Setting WORKER_COUNT = new Setting<>("transport.netty.worker_count", + public static final Setting WORKER_COUNT = + new Setting<>("transport.netty.worker_count", (s) -> Integer.toString(EsExecutors.boundedNumberOfProcessors(s) * 2), - (s) -> Setting.parseInt(s, 1, "transport.netty.worker_count"), false, Setting.Scope.CLUSTER); - public static final Setting CONNECTIONS_PER_NODE_RECOVERY = intSetting("transport.connections_per_node.recovery", 2, 1, false, - Scope.CLUSTER); - public static final Setting CONNECTIONS_PER_NODE_BULK = intSetting("transport.connections_per_node.bulk", 3, 1, false, - Scope.CLUSTER); - public static final Setting CONNECTIONS_PER_NODE_REG = intSetting("transport.connections_per_node.reg", 6, 1, false, - Scope.CLUSTER); - public static final Setting CONNECTIONS_PER_NODE_STATE = intSetting("transport.connections_per_node.state", 1, 1, false, - Scope.CLUSTER); - public static final Setting CONNECTIONS_PER_NODE_PING = intSetting("transport.connections_per_node.ping", 1, 1, false, - Scope.CLUSTER); + (s) -> Setting.parseInt(s, 1, "transport.netty.worker_count"), Property.NodeScope); + public static final Setting CONNECTIONS_PER_NODE_RECOVERY = + intSetting("transport.connections_per_node.recovery", 2, 1, Property.NodeScope); + public static final Setting CONNECTIONS_PER_NODE_BULK = + intSetting("transport.connections_per_node.bulk", 3, 1, Property.NodeScope); + public static final Setting CONNECTIONS_PER_NODE_REG = + intSetting("transport.connections_per_node.reg", 6, 1, Property.NodeScope); + public static final Setting CONNECTIONS_PER_NODE_STATE = + intSetting("transport.connections_per_node.state", 1, 1, Property.NodeScope); + public static final Setting CONNECTIONS_PER_NODE_PING = + intSetting("transport.connections_per_node.ping", 1, 1, Property.NodeScope); // the scheduled internal ping interval setting, defaults to disabled (-1) - public static final Setting PING_SCHEDULE = timeSetting("transport.ping_schedule", TimeValue.timeValueSeconds(-1), false, - Setting.Scope.CLUSTER); - public static final Setting TCP_BLOCKING_CLIENT = boolSetting("transport.tcp.blocking_client", TcpSettings.TCP_BLOCKING_CLIENT, - false, Setting.Scope.CLUSTER); - public static final Setting TCP_CONNECT_TIMEOUT = timeSetting("transport.tcp.connect_timeout", - TcpSettings.TCP_CONNECT_TIMEOUT, false, Setting.Scope.CLUSTER); - public static final Setting TCP_NO_DELAY = boolSetting("transport.tcp_no_delay", TcpSettings.TCP_NO_DELAY, false, - Setting.Scope.CLUSTER); - public static final Setting TCP_KEEP_ALIVE = boolSetting("transport.tcp.keep_alive", TcpSettings.TCP_KEEP_ALIVE, false, - Setting.Scope.CLUSTER); - public static final Setting TCP_BLOCKING_SERVER = boolSetting("transport.tcp.blocking_server", TcpSettings.TCP_BLOCKING_SERVER, - false, Setting.Scope.CLUSTER); - public static final Setting TCP_REUSE_ADDRESS = boolSetting("transport.tcp.reuse_address", TcpSettings.TCP_REUSE_ADDRESS, - false, Setting.Scope.CLUSTER); + public static final Setting PING_SCHEDULE = + timeSetting("transport.ping_schedule", TimeValue.timeValueSeconds(-1), Property.NodeScope); + public static final Setting TCP_BLOCKING_CLIENT = + boolSetting("transport.tcp.blocking_client", TcpSettings.TCP_BLOCKING_CLIENT, Property.NodeScope); + public static final Setting TCP_CONNECT_TIMEOUT = + timeSetting("transport.tcp.connect_timeout", TcpSettings.TCP_CONNECT_TIMEOUT, Property.NodeScope); + public static final Setting TCP_NO_DELAY = + boolSetting("transport.tcp_no_delay", TcpSettings.TCP_NO_DELAY, Property.NodeScope); + public static final Setting TCP_KEEP_ALIVE = + boolSetting("transport.tcp.keep_alive", TcpSettings.TCP_KEEP_ALIVE, Property.NodeScope); + public static final Setting TCP_BLOCKING_SERVER = + boolSetting("transport.tcp.blocking_server", TcpSettings.TCP_BLOCKING_SERVER, Property.NodeScope); + public static final Setting TCP_REUSE_ADDRESS = + boolSetting("transport.tcp.reuse_address", TcpSettings.TCP_REUSE_ADDRESS, Property.NodeScope); - public static final Setting TCP_SEND_BUFFER_SIZE = Setting.byteSizeSetting("transport.tcp.send_buffer_size", TcpSettings.TCP_SEND_BUFFER_SIZE, false, Setting.Scope.CLUSTER); - public static final Setting TCP_RECEIVE_BUFFER_SIZE = Setting.byteSizeSetting("transport.tcp.receive_buffer_size", TcpSettings.TCP_RECEIVE_BUFFER_SIZE, false, Setting.Scope.CLUSTER); + public static final Setting TCP_SEND_BUFFER_SIZE = + Setting.byteSizeSetting("transport.tcp.send_buffer_size", TcpSettings.TCP_SEND_BUFFER_SIZE, Property.NodeScope); + public static final Setting TCP_RECEIVE_BUFFER_SIZE = + Setting.byteSizeSetting("transport.tcp.receive_buffer_size", TcpSettings.TCP_RECEIVE_BUFFER_SIZE, Property.NodeScope); - public static final Setting NETTY_MAX_CUMULATION_BUFFER_CAPACITY = Setting.byteSizeSetting("transport.netty.max_cumulation_buffer_capacity", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER); - public static final Setting NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = Setting.intSetting("transport.netty.max_composite_buffer_components", -1, -1, false, Setting.Scope.CLUSTER); + public static final Setting NETTY_MAX_CUMULATION_BUFFER_CAPACITY = + Setting.byteSizeSetting("transport.netty.max_cumulation_buffer_capacity", new ByteSizeValue(-1), Property.NodeScope); + public static final Setting NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = + Setting.intSetting("transport.netty.max_composite_buffer_components", -1, -1, Property.NodeScope); // See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one public static final Setting NETTY_RECEIVE_PREDICTOR_SIZE = Setting.byteSizeSetting( @@ -195,12 +201,13 @@ public class NettyTransport extends AbstractLifecycleComponent implem defaultReceiverPredictor = Math.min(defaultReceiverPredictor, Math.max(l, 64 * 1024)); } return new ByteSizeValue(defaultReceiverPredictor).toString(); - }, false, Setting.Scope.CLUSTER); - public static final Setting NETTY_RECEIVE_PREDICTOR_MIN = byteSizeSetting("transport.netty.receive_predictor_min", - NETTY_RECEIVE_PREDICTOR_SIZE, false, Scope.CLUSTER); - public static final Setting NETTY_RECEIVE_PREDICTOR_MAX = byteSizeSetting("transport.netty.receive_predictor_max", - NETTY_RECEIVE_PREDICTOR_SIZE, false, Scope.CLUSTER); - public static final Setting NETTY_BOSS_COUNT = intSetting("transport.netty.boss_count", 1, 1, false, Scope.CLUSTER); + }, Property.NodeScope); + public static final Setting NETTY_RECEIVE_PREDICTOR_MIN = + byteSizeSetting("transport.netty.receive_predictor_min", NETTY_RECEIVE_PREDICTOR_SIZE, Property.NodeScope); + public static final Setting NETTY_RECEIVE_PREDICTOR_MAX = + byteSizeSetting("transport.netty.receive_predictor_max", NETTY_RECEIVE_PREDICTOR_SIZE, Property.NodeScope); + public static final Setting NETTY_BOSS_COUNT = + intSetting("transport.netty.boss_count", 1, 1, Property.NodeScope); protected final NetworkService networkService; protected final Version version; @@ -943,8 +950,8 @@ public class NettyTransport extends AbstractLifecycleComponent implem } globalLock.readLock().lock(); try { - connectionLock.acquire(node.id()); - try { + + try (Releasable ignored = connectionLock.acquire(node.id())) { if (!lifecycle.started()) { throw new IllegalStateException("can't add nodes to a stopped transport"); } @@ -979,8 +986,6 @@ public class NettyTransport extends AbstractLifecycleComponent implem } catch (Exception e) { throw new ConnectTransportException(node, "general node connection failure", e); } - } finally { - connectionLock.release(node.id()); } } finally { globalLock.readLock().unlock(); @@ -1103,8 +1108,8 @@ public class NettyTransport extends AbstractLifecycleComponent implem @Override public void disconnectFromNode(DiscoveryNode node) { - connectionLock.acquire(node.id()); - try { + + try (Releasable ignored = connectionLock.acquire(node.id())) { NodeChannels nodeChannels = connectedNodes.remove(node); if (nodeChannels != null) { try { @@ -1115,8 +1120,6 @@ public class NettyTransport extends AbstractLifecycleComponent implem transportServiceAdapter.raiseNodeDisconnected(node); } } - } finally { - connectionLock.release(node.id()); } } @@ -1128,8 +1131,7 @@ public class NettyTransport extends AbstractLifecycleComponent implem // check outside of the lock NodeChannels nodeChannels = connectedNodes.get(node); if (nodeChannels != null && nodeChannels.hasChannel(channel)) { - connectionLock.acquire(node.id()); - try { + try (Releasable ignored = connectionLock.acquire(node.id())) { nodeChannels = connectedNodes.get(node); // check again within the connection lock, if its still applicable to remove it if (nodeChannels != null && nodeChannels.hasChannel(channel)) { @@ -1143,8 +1145,6 @@ public class NettyTransport extends AbstractLifecycleComponent implem } return true; } - } finally { - connectionLock.release(node.id()); } } return false; @@ -1385,9 +1385,9 @@ public class NettyTransport extends AbstractLifecycleComponent implem @Override public void onFailure(Throwable t) { if (lifecycle.stoppedOrClosed()) { - logger.trace("[{}] failed to send ping transport message", t); + logger.trace("failed to send ping transport message", t); } else { - logger.warn("[{}] failed to send ping transport message", t); + logger.warn("failed to send ping transport message", t); } } } diff --git a/core/src/main/java/org/elasticsearch/tribe/TribeService.java b/core/src/main/java/org/elasticsearch/tribe/TribeService.java index 6f30a4931ab..2bac280351e 100644 --- a/core/src/main/java/org/elasticsearch/tribe/TribeService.java +++ b/core/src/main/java/org/elasticsearch/tribe/TribeService.java @@ -23,7 +23,6 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ClusterStateTaskConfig; @@ -37,12 +36,16 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.set.Sets; @@ -52,7 +55,9 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.node.Node; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.transport.TransportSettings; +import java.util.Arrays; import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; @@ -121,7 +126,7 @@ public class TribeService extends AbstractLifecycleComponent { } // internal settings only - public static final Setting TRIBE_NAME_SETTING = Setting.simpleString("tribe.name", false, Setting.Scope.CLUSTER); + public static final Setting TRIBE_NAME_SETTING = Setting.simpleString("tribe.name", Property.NodeScope); private final ClusterService clusterService; private final String[] blockIndicesWrite; private final String[] blockIndicesRead; @@ -140,22 +145,31 @@ public class TribeService extends AbstractLifecycleComponent { throw new IllegalArgumentException( "Invalid value for [tribe.on_conflict] must be either [any, drop or start with prefer_] but was: [" + s + "]"); } - }, false, Setting.Scope.CLUSTER); + }, Property.NodeScope); - public static final Setting BLOCKS_METADATA_SETTING = Setting.boolSetting("tribe.blocks.metadata", false, false, - Setting.Scope.CLUSTER); - public static final Setting BLOCKS_WRITE_SETTING = Setting.boolSetting("tribe.blocks.write", false, false, - Setting.Scope.CLUSTER); - public static final Setting> BLOCKS_WRITE_INDICES_SETTING = Setting.listSetting("tribe.blocks.write.indices", - Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER); - public static final Setting> BLOCKS_READ_INDICES_SETTING = Setting.listSetting("tribe.blocks.read.indices", - Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER); - public static final Setting> BLOCKS_METADATA_INDICES_SETTING = Setting.listSetting("tribe.blocks.metadata.indices", - Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER); + public static final Setting BLOCKS_METADATA_SETTING = + Setting.boolSetting("tribe.blocks.metadata", false, Property.NodeScope); + public static final Setting BLOCKS_WRITE_SETTING = + Setting.boolSetting("tribe.blocks.write", false, Property.NodeScope); + public static final Setting> BLOCKS_WRITE_INDICES_SETTING = + Setting.listSetting("tribe.blocks.write.indices", Collections.emptyList(), Function.identity(), Property.NodeScope); + public static final Setting> BLOCKS_READ_INDICES_SETTING = + Setting.listSetting("tribe.blocks.read.indices", Collections.emptyList(), Function.identity(), Property.NodeScope); + public static final Setting> BLOCKS_METADATA_INDICES_SETTING = + Setting.listSetting("tribe.blocks.metadata.indices", Collections.emptyList(), Function.identity(), Property.NodeScope); public static final Set TRIBE_SETTING_KEYS = Sets.newHashSet(TRIBE_NAME_SETTING.getKey(), ON_CONFLICT_SETTING.getKey(), BLOCKS_METADATA_INDICES_SETTING.getKey(), BLOCKS_METADATA_SETTING.getKey(), BLOCKS_READ_INDICES_SETTING.getKey(), BLOCKS_WRITE_INDICES_SETTING.getKey(), BLOCKS_WRITE_SETTING.getKey()); + // these settings should be passed through to each tribe client, if they are not set explicitly + private static final List> PASS_THROUGH_SETTINGS = Arrays.asList( + NetworkService.GLOBAL_NETWORK_HOST_SETTING, + NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING, + NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING, + TransportSettings.HOST, + TransportSettings.BIND_HOST, + TransportSettings.PUBLISH_HOST + ); private final String onConflict; private final Set droppedIndices = ConcurrentCollections.newConcurrentSet(); @@ -169,20 +183,8 @@ public class TribeService extends AbstractLifecycleComponent { nodesSettings.remove("blocks"); // remove prefix settings that don't indicate a client nodesSettings.remove("on_conflict"); // remove prefix settings that don't indicate a client for (Map.Entry entry : nodesSettings.entrySet()) { - Settings.Builder sb = Settings.builder().put(entry.getValue()); - sb.put("node.name", settings.get("node.name") + "/" + entry.getKey()); - sb.put(Environment.PATH_HOME_SETTING.getKey(), Environment.PATH_HOME_SETTING.get(settings)); // pass through ES home dir - if (Environment.PATH_CONF_SETTING.exists(settings)) { - sb.put(Environment.PATH_CONF_SETTING.getKey(), Environment.PATH_CONF_SETTING.get(settings)); - } - sb.put(TRIBE_NAME_SETTING.getKey(), entry.getKey()); - if (sb.get("http.enabled") == null) { - sb.put("http.enabled", false); - } - sb.put(Node.NODE_DATA_SETTING.getKey(), false); - sb.put(Node.NODE_MASTER_SETTING.getKey(), false); - sb.put(Node.NODE_INGEST_SETTING.getKey(), false); - nodes.add(new TribeClientNode(sb.build())); + Settings clientSettings = buildClientSettings(entry.getKey(), settings, entry.getValue()); + nodes.add(new TribeClientNode(clientSettings)); } this.blockIndicesMetadata = BLOCKS_METADATA_INDICES_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY); @@ -201,6 +203,48 @@ public class TribeService extends AbstractLifecycleComponent { this.onConflict = ON_CONFLICT_SETTING.get(settings); } + // pkg private for testing + /** + * Builds node settings for a tribe client node from the tribe node's global settings, + * combined with tribe specific settings. + */ + static Settings buildClientSettings(String tribeName, Settings globalSettings, Settings tribeSettings) { + for (String tribeKey : tribeSettings.getAsMap().keySet()) { + if (tribeKey.startsWith("path.")) { + throw new IllegalArgumentException("Setting [" + tribeKey + "] not allowed in tribe client [" + tribeName + "]"); + } + } + Settings.Builder sb = Settings.builder().put(tribeSettings); + sb.put("node.name", globalSettings.get("node.name") + "/" + tribeName); + sb.put(Environment.PATH_HOME_SETTING.getKey(), Environment.PATH_HOME_SETTING.get(globalSettings)); // pass through ES home dir + if (Environment.PATH_CONF_SETTING.exists(globalSettings)) { + sb.put(Environment.PATH_CONF_SETTING.getKey(), Environment.PATH_CONF_SETTING.get(globalSettings)); + } + if (Environment.PATH_PLUGINS_SETTING.exists(globalSettings)) { + sb.put(Environment.PATH_PLUGINS_SETTING.getKey(), Environment.PATH_PLUGINS_SETTING.get(globalSettings)); + } + if (Environment.PATH_LOGS_SETTING.exists(globalSettings)) { + sb.put(Environment.PATH_LOGS_SETTING.getKey(), Environment.PATH_LOGS_SETTING.get(globalSettings)); + } + if (Environment.PATH_SCRIPTS_SETTING.exists(globalSettings)) { + sb.put(Environment.PATH_SCRIPTS_SETTING.getKey(), Environment.PATH_SCRIPTS_SETTING.get(globalSettings)); + } + for (Setting passthrough : PASS_THROUGH_SETTINGS) { + if (passthrough.exists(tribeSettings) == false && passthrough.exists(globalSettings)) { + sb.put(passthrough.getKey(), globalSettings.get(passthrough.getKey())); + } + } + sb.put(TRIBE_NAME_SETTING.getKey(), tribeName); + if (sb.get(NetworkModule.HTTP_ENABLED.getKey()) == null) { + sb.put(NetworkModule.HTTP_ENABLED.getKey(), false); + } + sb.put(Node.NODE_DATA_SETTING.getKey(), false); + sb.put(Node.NODE_MASTER_SETTING.getKey(), false); + sb.put(Node.NODE_INGEST_SETTING.getKey(), false); + return sb.build(); + } + + @Override protected void doStart() { if (nodes.isEmpty() == false) { @@ -222,7 +266,7 @@ public class TribeService extends AbstractLifecycleComponent { try { otherNode.close(); } catch (Throwable t) { - logger.warn("failed to close node {} on failed start", otherNode, t); + logger.warn("failed to close node {} on failed start", t, otherNode); } } if (e instanceof RuntimeException) { @@ -364,9 +408,11 @@ public class TribeService extends AbstractLifecycleComponent { if (table == null) { continue; } - final IndexMetaData indexMetaData = currentState.metaData().index(tribeIndex.getIndex()); + //NOTE: we have to use the index name here since UUID are different even if the name is the same + final String indexName = tribeIndex.getIndex().getName(); + final IndexMetaData indexMetaData = currentState.metaData().index(indexName); if (indexMetaData == null) { - if (!droppedIndices.contains(tribeIndex.getIndex().getName())) { + if (!droppedIndices.contains(indexName)) { // a new index, add it, and add the tribe name as a setting clusterStateChanged = true; logger.info("[{}] adding index {}", tribeName, tribeIndex.getIndex()); @@ -384,7 +430,7 @@ public class TribeService extends AbstractLifecycleComponent { logger.info("[{}] dropping index {} due to conflict with [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe); removeIndex(blocks, metaData, routingTable, tribeIndex); - droppedIndices.add(tribeIndex.getIndex().getName()); + droppedIndices.add(indexName); } else if (onConflict.startsWith(ON_CONFLICT_PREFER)) { // on conflict, prefer a tribe... String preferredTribeName = onConflict.substring(ON_CONFLICT_PREFER.length()); diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/elasticsearch-start.help b/core/src/main/resources/org/elasticsearch/bootstrap/elasticsearch-start.help deleted file mode 100644 index 9b27a8dd390..00000000000 --- a/core/src/main/resources/org/elasticsearch/bootstrap/elasticsearch-start.help +++ /dev/null @@ -1,28 +0,0 @@ -NAME - - start - Start Elasticsearch - -SYNOPSIS - - elasticsearch start - -DESCRIPTION - - This command starts Elasticsearch. You can configure it to run in the foreground, write a pid file - and configure arbitrary options that override file-based configuration. - -OPTIONS - - -h,--help Shows this message - - -p,--pidfile Creates a pid file in the specified path on start - - -d,--daemonize Starts Elasticsearch in the background - - -Dproperty=value Configures an Elasticsearch specific property, like -Dnetwork.host=127.0.0.1 - - --property=value Configures an elasticsearch specific property, like --network.host 127.0.0.1 - --property value - - NOTE: The -d, -p, and -D arguments must appear before any --property arguments. - diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/elasticsearch-version.help b/core/src/main/resources/org/elasticsearch/bootstrap/elasticsearch-version.help deleted file mode 100644 index 00f2a33401c..00000000000 --- a/core/src/main/resources/org/elasticsearch/bootstrap/elasticsearch-version.help +++ /dev/null @@ -1,16 +0,0 @@ -NAME - - version - Show version information and exit - -SYNOPSIS - - elasticsearch version - -DESCRIPTION - - This command shows Elasticsearch version, timestamp and build information as well as JVM info - -OPTIONS - - -h,--help Shows this message - diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/elasticsearch.help b/core/src/main/resources/org/elasticsearch/bootstrap/elasticsearch.help deleted file mode 100644 index 83ee497dc21..00000000000 --- a/core/src/main/resources/org/elasticsearch/bootstrap/elasticsearch.help +++ /dev/null @@ -1,22 +0,0 @@ -NAME - - elasticsearch - Manages elasticsearch - -SYNOPSIS - - elasticsearch - -DESCRIPTION - - Start an elasticsearch node - -COMMANDS - - start Start elasticsearch - - version Show version information and exit - -NOTES - - [*] For usage help on specific commands please type "elasticsearch -h" - diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/security.policy b/core/src/main/resources/org/elasticsearch/bootstrap/security.policy index 608b33db0fe..4845011ad05 100644 --- a/core/src/main/resources/org/elasticsearch/bootstrap/security.policy +++ b/core/src/main/resources/org/elasticsearch/bootstrap/security.policy @@ -31,9 +31,12 @@ grant codeBase "${codebase.securesm-1.0.jar}" { //// Very special jar permissions: //// These are dangerous permissions that we don't want to grant to everything. -grant codeBase "${codebase.lucene-core-5.5.0.jar}" { +grant codeBase "${codebase.lucene-core-6.0.0-snapshot-f0aa4fc.jar}" { // needed to allow MMapDirectory's "unmap hack" (die unmap hack, die) + // java 8 package permission java.lang.RuntimePermission "accessClassInPackage.sun.misc"; + // java 9 "package" + permission java.lang.RuntimePermission "accessClassInPackage.jdk.internal.ref"; permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; // NOTE: also needed for RAMUsageEstimator size calculations permission java.lang.RuntimePermission "accessDeclaredMembers"; @@ -69,9 +72,6 @@ grant { // set by ESTestCase to improve test reproducibility // TODO: set this with gradle or some other way that repros with seed? permission java.util.PropertyPermission "es.processors.override", "write"; - // set by CLIToolTestCase - // TODO: do this differently? or test commandline tools differently? - permission java.util.PropertyPermission "es.default.path.home", "write"; // TODO: these simply trigger a noisy warning if its unable to clear the properties // fix that in randomizedtesting diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy b/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy index 856cd50e2a9..8d56bc44b9a 100644 --- a/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy +++ b/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy @@ -31,7 +31,7 @@ grant codeBase "${codebase.securemock-1.2.jar}" { permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; }; -grant codeBase "${codebase.lucene-test-framework-5.5.0.jar}" { +grant codeBase "${codebase.lucene-test-framework-6.0.0-snapshot-f0aa4fc.jar}" { // needed by RamUsageTester permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; }; diff --git a/core/src/main/resources/org/elasticsearch/plugins/plugin-install.help b/core/src/main/resources/org/elasticsearch/plugins/plugin-install.help deleted file mode 100644 index ba39e1ab8fb..00000000000 --- a/core/src/main/resources/org/elasticsearch/plugins/plugin-install.help +++ /dev/null @@ -1,59 +0,0 @@ -NAME - - install - Install a plugin - -SYNOPSIS - - plugin install - -DESCRIPTION - - This command installs an elasticsearch plugin. It can be used as follows: - - Officially supported or commercial plugins require just the plugin name: - - plugin install analysis-icu - plugin install x-pack - - Plugins from Maven Central require 'groupId:artifactId:version': - - plugin install org.elasticsearch:mapper-attachments:3.0.0 - - Plugins can be installed from a custom URL or file location as follows: - - plugin install http://some.domain.name//my-plugin-1.0.0.zip - plugin install file:/path/to/my-plugin-1.0.0.zip - -OFFICIAL PLUGINS - - The following plugins are officially supported and can be installed by just referring to their name - - - analysis-icu - - analysis-kuromoji - - analysis-phonetic - - analysis-smartcn - - analysis-stempel - - delete-by-query - - discovery-azure - - discovery-ec2 - - discovery-gce - - ingest-geoip - - lang-javascript - - lang-painless - - lang-python - - mapper-attachments - - mapper-murmur3 - - mapper-size - - repository-azure - - repository-hdfs - - repository-s3 - - store-smb - - -OPTIONS - - -v,--verbose Verbose output - - -h,--help Shows this message - - -b,--batch Enable batch mode explicitly, automatic confirmation of security permissions diff --git a/core/src/main/resources/org/elasticsearch/plugins/plugin-list.help b/core/src/main/resources/org/elasticsearch/plugins/plugin-list.help deleted file mode 100644 index c13949e8cb6..00000000000 --- a/core/src/main/resources/org/elasticsearch/plugins/plugin-list.help +++ /dev/null @@ -1,12 +0,0 @@ -NAME - - list - List all plugins - -SYNOPSIS - - plugin list - -DESCRIPTION - - This command lists all installed elasticsearch plugins - diff --git a/core/src/main/resources/org/elasticsearch/plugins/plugin-remove.help b/core/src/main/resources/org/elasticsearch/plugins/plugin-remove.help deleted file mode 100644 index b708adf1f69..00000000000 --- a/core/src/main/resources/org/elasticsearch/plugins/plugin-remove.help +++ /dev/null @@ -1,12 +0,0 @@ -NAME - - remove - Remove a plugin - -SYNOPSIS - - plugin remove - -DESCRIPTION - - This command removes an elasticsearch plugin - diff --git a/core/src/main/resources/org/elasticsearch/plugins/plugin.help b/core/src/main/resources/org/elasticsearch/plugins/plugin.help deleted file mode 100644 index 5cba544627a..00000000000 --- a/core/src/main/resources/org/elasticsearch/plugins/plugin.help +++ /dev/null @@ -1,24 +0,0 @@ -NAME - - plugin - Manages plugins - -SYNOPSIS - - plugin - -DESCRIPTION - - Manage plugins - -COMMANDS - - install Install a plugin - - remove Remove a plugin - - list List installed plugins - -NOTES - - [*] For usage help on specific commands please type "plugin -h" - diff --git a/core/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java b/core/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java index 39b4df44059..94806422c17 100644 --- a/core/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java +++ b/core/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java @@ -82,7 +82,7 @@ public class BlendedTermQueryTests extends ESTestCase { w.addDocument(d); } w.commit(); - DirectoryReader reader = DirectoryReader.open(w, true); + DirectoryReader reader = DirectoryReader.open(w); IndexSearcher searcher = setSimilarity(newSearcher(reader)); { @@ -143,7 +143,7 @@ public class BlendedTermQueryTests extends ESTestCase { w.addDocument(d); } w.commit(); - DirectoryReader reader = DirectoryReader.open(w, true); + DirectoryReader reader = DirectoryReader.open(w); IndexSearcher searcher = setSimilarity(newSearcher(reader)); { String[] fields = new String[]{"username", "song"}; diff --git a/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index 606ff09a60f..955eb309436 100644 --- a/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -63,7 +63,6 @@ import org.elasticsearch.indices.IndexTemplateAlreadyExistsException; import org.elasticsearch.indices.IndexTemplateMissingException; import org.elasticsearch.indices.InvalidIndexTemplateException; import org.elasticsearch.indices.recovery.RecoverFilesRecoveryException; -import org.elasticsearch.percolator.PercolateException; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.admin.indices.alias.delete.AliasesNotFoundException; @@ -382,19 +381,6 @@ public class ExceptionSerializationTests extends ESTestCase { assertEquals(id, ex.id()); } - public void testPercolateException() throws IOException { - ShardId id = new ShardId("foo", "_na_", 1); - PercolateException ex = serialize(new PercolateException(id, "percolate my ass", null)); - assertEquals(id, ex.getShardId()); - assertEquals("percolate my ass", ex.getMessage()); - assertNull(ex.getCause()); - - ex = serialize(new PercolateException(id, "percolate my ass", new NullPointerException())); - assertEquals(id, ex.getShardId()); - assertEquals("percolate my ass", ex.getMessage()); - assertTrue(ex.getCause() instanceof NullPointerException); - } - public void testRoutingValidationException() throws IOException { RoutingTableValidation validation = new RoutingTableValidation(); validation.addIndexFailure("foo", "bar"); @@ -746,7 +732,6 @@ public class ExceptionSerializationTests extends ESTestCase { ids.put(85, org.elasticsearch.index.AlreadyExpiredException.class); ids.put(86, org.elasticsearch.search.aggregations.AggregationExecutionException.class); ids.put(88, org.elasticsearch.indices.InvalidIndexTemplateException.class); - ids.put(89, org.elasticsearch.percolator.PercolateException.class); ids.put(90, org.elasticsearch.index.engine.RefreshFailedEngineException.class); ids.put(91, org.elasticsearch.search.aggregations.AggregationInitializationException.class); ids.put(92, org.elasticsearch.indices.recovery.DelayRecoveryException.class); diff --git a/core/src/test/java/org/elasticsearch/VersionTests.java b/core/src/test/java/org/elasticsearch/VersionTests.java index 4669f5bc718..17a41c30275 100644 --- a/core/src/test/java/org/elasticsearch/VersionTests.java +++ b/core/src/test/java/org/elasticsearch/VersionTests.java @@ -31,8 +31,8 @@ import java.util.HashMap; import java.util.Locale; import java.util.Map; -import static org.elasticsearch.Version.V_0_20_0; -import static org.elasticsearch.Version.V_0_90_0; +import static org.elasticsearch.Version.V_2_2_0; +import static org.elasticsearch.Version.V_5_0_0; import static org.elasticsearch.test.VersionUtils.randomVersion; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.containsString; @@ -42,21 +42,27 @@ import static org.hamcrest.Matchers.sameInstance; public class VersionTests extends ESTestCase { public void testVersionComparison() throws Exception { - assertThat(V_0_20_0.before(V_0_90_0), is(true)); - assertThat(V_0_20_0.before(V_0_20_0), is(false)); - assertThat(V_0_90_0.before(V_0_20_0), is(false)); + assertThat(V_2_2_0.before(V_5_0_0), is(true)); + assertThat(V_2_2_0.before(V_2_2_0), is(false)); + assertThat(V_5_0_0.before(V_2_2_0), is(false)); - assertThat(V_0_20_0.onOrBefore(V_0_90_0), is(true)); - assertThat(V_0_20_0.onOrBefore(V_0_20_0), is(true)); - assertThat(V_0_90_0.onOrBefore(V_0_20_0), is(false)); + assertThat(V_2_2_0.onOrBefore(V_5_0_0), is(true)); + assertThat(V_2_2_0.onOrBefore(V_2_2_0), is(true)); + assertThat(V_5_0_0.onOrBefore(V_2_2_0), is(false)); - assertThat(V_0_20_0.after(V_0_90_0), is(false)); - assertThat(V_0_20_0.after(V_0_20_0), is(false)); - assertThat(V_0_90_0.after(V_0_20_0), is(true)); + assertThat(V_2_2_0.after(V_5_0_0), is(false)); + assertThat(V_2_2_0.after(V_2_2_0), is(false)); + assertThat(V_5_0_0.after(V_2_2_0), is(true)); + + assertThat(V_2_2_0.onOrAfter(V_5_0_0), is(false)); + assertThat(V_2_2_0.onOrAfter(V_2_2_0), is(true)); + assertThat(V_5_0_0.onOrAfter(V_2_2_0), is(true)); + + assertTrue(Version.fromString("5.0.0-alpha2").onOrAfter(Version.fromString("5.0.0-alpha1"))); + assertTrue(Version.fromString("5.0.0").onOrAfter(Version.fromString("5.0.0-beta2"))); + assertTrue(Version.fromString("5.0.0-rc1").onOrAfter(Version.fromString("5.0.0-beta24"))); + assertTrue(Version.fromString("5.0.0-alpha24").before(Version.fromString("5.0.0-beta0"))); - assertThat(V_0_20_0.onOrAfter(V_0_90_0), is(false)); - assertThat(V_0_20_0.onOrAfter(V_0_20_0), is(true)); - assertThat(V_0_90_0.onOrAfter(V_0_20_0), is(true)); } public void testVersionConstantPresent() { @@ -127,31 +133,57 @@ public class VersionTests extends ESTestCase { public void testIndexCreatedVersion() { // an actual index has a IndexMetaData.SETTING_INDEX_UUID - final Version version = randomFrom(Version.V_0_18_0, Version.V_0_90_13, Version.V_1_3_0); + final Version version = randomFrom(Version.V_2_0_0, Version.V_2_3_0, Version.V_5_0_0); assertEquals(version, Version.indexCreated(Settings.builder().put(IndexMetaData.SETTING_INDEX_UUID, "foo").put(IndexMetaData.SETTING_VERSION_CREATED, version).build())); } public void testMinCompatVersion() { assertThat(Version.V_2_0_0_beta1.minimumCompatibilityVersion(), equalTo(Version.V_2_0_0_beta1)); - assertThat(Version.V_1_3_0.minimumCompatibilityVersion(), equalTo(Version.V_1_0_0)); - assertThat(Version.V_1_2_0.minimumCompatibilityVersion(), equalTo(Version.V_1_0_0)); - assertThat(Version.V_1_2_3.minimumCompatibilityVersion(), equalTo(Version.V_1_0_0)); - assertThat(Version.V_1_0_0_RC2.minimumCompatibilityVersion(), equalTo(Version.V_1_0_0_RC2)); + assertThat(Version.V_2_1_0.minimumCompatibilityVersion(), equalTo(Version.V_2_0_0)); + assertThat(Version.V_2_2_0.minimumCompatibilityVersion(), equalTo(Version.V_2_0_0)); + assertThat(Version.V_2_3_0.minimumCompatibilityVersion(), equalTo(Version.V_2_0_0)); + assertThat(Version.V_5_0_0.minimumCompatibilityVersion(), equalTo(Version.V_5_0_0)); } public void testToString() { // with 2.0.beta we lowercase assertEquals("2.0.0-beta1", Version.V_2_0_0_beta1.toString()); - assertEquals("1.4.0.Beta1", Version.V_1_4_0_Beta1.toString()); - assertEquals("1.4.0", Version.V_1_4_0.toString()); + assertEquals("5.0.0", Version.V_5_0_0.toString()); + assertEquals("2.3.0", Version.V_2_3_0.toString()); + assertEquals("0.90.0.Beta1", Version.fromString("0.90.0.Beta1").toString()); + assertEquals("1.0.0.Beta1", Version.fromString("1.0.0.Beta1").toString()); + assertEquals("2.0.0-beta1", Version.fromString("2.0.0-beta1").toString()); + assertEquals("5.0.0-beta1", Version.fromString("5.0.0-beta1").toString()); + assertEquals("5.0.0-alpha1", Version.fromString("5.0.0-alpha1").toString()); } public void testIsBeta() { assertTrue(Version.V_2_0_0_beta1.isBeta()); - assertTrue(Version.V_1_4_0_Beta1.isBeta()); - assertFalse(Version.V_1_4_0.isBeta()); + assertTrue(Version.fromString("1.0.0.Beta1").isBeta()); + assertTrue(Version.fromString("0.90.0.Beta1").isBeta()); } + + public void testIsAlpha() { + assertTrue(new Version(5000001, org.apache.lucene.util.Version.LUCENE_6_0_0).isAlpha()); + assertFalse(new Version(4000002, org.apache.lucene.util.Version.LUCENE_6_0_0).isAlpha()); + assertTrue(new Version(4000002, org.apache.lucene.util.Version.LUCENE_6_0_0).isBeta()); + assertTrue(Version.fromString("5.0.0-alpha14").isAlpha()); + assertEquals(5000014, Version.fromString("5.0.0-alpha14").id); + assertTrue(Version.fromId(5000015).isAlpha()); + + for (int i = 0 ; i < 25; i++) { + assertEquals(Version.fromString("5.0.0-alpha" + i).id, Version.fromId(5000000 + i).id); + assertEquals("5.0.0-alpha" + i, Version.fromId(5000000 + i).toString()); + } + + for (int i = 0 ; i < 25; i++) { + assertEquals(Version.fromString("5.0.0-beta" + i).id, Version.fromId(5000000 + i + 25).id); + assertEquals("5.0.0-beta" + i, Version.fromId(5000000 + i + 25).toString()); + } + } + + public void testParseVersion() { final int iters = scaledRandomIntBetween(100, 1000); for (int i = 0; i < iters; i++) { @@ -162,6 +194,17 @@ public class VersionTests extends ESTestCase { Version parsedVersion = Version.fromString(version.toString()); assertEquals(version, parsedVersion); } + + expectThrows(IllegalArgumentException.class, () -> { + Version.fromString("5.0.0-alph2"); + }); + assertSame(Version.CURRENT, Version.fromString(Version.CURRENT.toString())); + + assertSame(Version.fromString("2.0.0-SNAPSHOT"), Version.fromString("2.0.0")); + + expectThrows(IllegalArgumentException.class, () -> { + Version.fromString("5.0.0-SNAPSHOT"); + }); } public void testParseLenient() { @@ -188,7 +231,7 @@ public class VersionTests extends ESTestCase { assertTrue(constantName + " should be final", Modifier.isFinal(versionConstant.getModifiers())); Version v = (Version) versionConstant.get(Version.class); - logger.info("Checking " + v); + logger.info("Checking {}", v); assertEquals("Version id " + field.getName() + " does not point to " + constantName, v, Version.fromId(versionId)); assertEquals("Version " + constantName + " does not have correct id", versionId, v.id); if (v.major >= 2) { @@ -217,4 +260,20 @@ public class VersionTests extends ESTestCase { } } + // this test ensures we never bump the lucene version in a bugfix release + public void testLuceneVersionIsSameOnMinorRelease() { + for (Version version : VersionUtils.allVersions()) { + for (Version other : VersionUtils.allVersions()) { + if (other.onOrAfter(version)) { + assertTrue("lucene versions must be " + other + " >= " + version, + other.luceneVersion.onOrAfter(version.luceneVersion)); + } + if (other.major == version.major && other.minor == version.minor) { + assertEquals(other.luceneVersion.major, version.luceneVersion.major); + assertEquals(other.luceneVersion.minor, version.luceneVersion.minor); + // should we also assert the lucene bugfix version? + } + } + } + } } diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java index 5109ab979cf..379cb5942a2 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java @@ -29,8 +29,8 @@ import org.elasticsearch.action.support.nodes.BaseNodeRequest; import org.elasticsearch.action.support.nodes.BaseNodesRequest; import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; @@ -49,6 +49,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicReference; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.setState; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -237,8 +238,8 @@ public class CancellableTasksTests extends TaskManagerTestCase { // Cancel main task CancelTasksRequest request = new CancelTasksRequest(); - request.reason("Testing Cancellation"); - request.taskId(new TaskId(testNodes[0].discoveryNode.getId(), mainTask.getId())); + request.setReason("Testing Cancellation"); + request.setTaskId(new TaskId(testNodes[0].discoveryNode.getId(), mainTask.getId())); // And send the cancellation request to a random node CancelTasksResponse response = testNodes[randomIntBetween(0, testNodes.length - 1)].transportCancelTasksAction.execute(request) .get(); @@ -270,7 +271,7 @@ public class CancellableTasksTests extends TaskManagerTestCase { // Make sure that tasks are no longer running ListTasksResponse listTasksResponse = testNodes[randomIntBetween(0, testNodes.length - 1)] - .transportListTasksAction.execute(new ListTasksRequest().taskId( + .transportListTasksAction.execute(new ListTasksRequest().setTaskId( new TaskId(testNodes[0].discoveryNode.getId(), mainTask.getId()))).get(); assertEquals(0, listTasksResponse.getTasks().size()); @@ -313,7 +314,7 @@ public class CancellableTasksTests extends TaskManagerTestCase { // Make sure that tasks are running ListTasksResponse listTasksResponse = testNodes[randomIntBetween(0, testNodes.length - 1)] - .transportListTasksAction.execute(new ListTasksRequest().parentTaskId(new TaskId(mainNode, mainTask.getId()))).get(); + .transportListTasksAction.execute(new ListTasksRequest().setParentTaskId(new TaskId(mainNode, mainTask.getId()))).get(); assertThat(listTasksResponse.getTasks().size(), greaterThanOrEqualTo(blockOnNodes.size())); // Simulate the coordinating node leaving the cluster @@ -324,15 +325,15 @@ public class CancellableTasksTests extends TaskManagerTestCase { DiscoveryNode master = discoveryNodes[0]; for (int i = 1; i < testNodes.length; i++) { // Notify only nodes that should remain in the cluster - testNodes[i].clusterService.setState(ClusterStateCreationUtils.state(testNodes[i].discoveryNode, master, discoveryNodes)); + setState(testNodes[i].clusterService, ClusterStateCreationUtils.state(testNodes[i].discoveryNode, master, discoveryNodes)); } if (simulateBanBeforeLeaving) { logger.info("--> Simulate issuing cancel request on the node that is about to leave the cluster"); // Simulate issuing cancel request on the node that is about to leave the cluster CancelTasksRequest request = new CancelTasksRequest(); - request.reason("Testing Cancellation"); - request.taskId(new TaskId(testNodes[0].discoveryNode.getId(), mainTask.getId())); + request.setReason("Testing Cancellation"); + request.setTaskId(new TaskId(testNodes[0].discoveryNode.getId(), mainTask.getId())); // And send the cancellation request to a random node CancelTasksResponse response = testNodes[0].transportCancelTasksAction.execute(request).get(); logger.info("--> Done simulating issuing cancel request on the node that is about to leave the cluster"); @@ -356,7 +357,7 @@ public class CancellableTasksTests extends TaskManagerTestCase { // Make sure that tasks are no longer running try { ListTasksResponse listTasksResponse1 = testNodes[randomIntBetween(1, testNodes.length - 1)] - .transportListTasksAction.execute(new ListTasksRequest().taskId(new TaskId(mainNode, mainTask.getId()))).get(); + .transportListTasksAction.execute(new ListTasksRequest().setTaskId(new TaskId(mainNode, mainTask.getId()))).get(); assertEquals(0, listTasksResponse1.getTasks().size()); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java index 4dcf54b5d0b..48d9f8fed40 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java @@ -30,9 +30,9 @@ import org.elasticsearch.action.support.nodes.BaseNodesResponse; import org.elasticsearch.action.support.nodes.TransportNodesAction; import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -40,7 +40,6 @@ import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.cluster.TestClusterService; import org.elasticsearch.test.tasks.MockTaskManager; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -58,6 +57,9 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReferenceArray; import java.util.function.Supplier; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.createClusterService; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.setState; + /** * The test case for unit testing task manager and related transport actions */ @@ -137,14 +139,14 @@ public abstract class TaskManagerTestCase extends ESTestCase { * Simulates node-based task that can be used to block node tasks so they are guaranteed to be registered by task manager */ abstract class AbstractTestNodesAction, NodeRequest extends BaseNodeRequest> - extends TransportNodesAction { + extends TransportNodesAction { AbstractTestNodesAction(Settings settings, String actionName, ClusterName clusterName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, Supplier request, Supplier nodeRequest) { super(settings, actionName, clusterName, threadPool, clusterService, transportService, - new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(Settings.EMPTY), - request, nodeRequest, ThreadPool.Names.GENERIC); + new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(Settings.EMPTY), + request, nodeRequest, ThreadPool.Names.GENERIC); } @Override @@ -182,8 +184,8 @@ public abstract class TaskManagerTestCase extends ESTestCase { public static class TestNode implements Releasable { public TestNode(String name, ThreadPool threadPool, Settings settings) { transportService = new TransportService(settings, - new LocalTransport(settings, threadPool, Version.CURRENT, new NamedWriteableRegistry()), - threadPool, new NamedWriteableRegistry()) { + new LocalTransport(settings, threadPool, Version.CURRENT, new NamedWriteableRegistry()), + threadPool) { @Override protected TaskManager createTaskManager() { if (MockTaskManager.USE_MOCK_TASK_MANAGER_SETTING.get(settings)) { @@ -194,19 +196,19 @@ public abstract class TaskManagerTestCase extends ESTestCase { } }; transportService.start(); - clusterService = new TestClusterService(threadPool, transportService); + clusterService = createClusterService(threadPool); clusterService.add(transportService.getTaskManager()); discoveryNode = new DiscoveryNode(name, transportService.boundAddress().publishAddress(), Version.CURRENT); IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(settings); ActionFilters actionFilters = new ActionFilters(Collections.emptySet()); transportListTasksAction = new TransportListTasksAction(settings, clusterName, threadPool, clusterService, transportService, - actionFilters, indexNameExpressionResolver); + actionFilters, indexNameExpressionResolver); transportCancelTasksAction = new TransportCancelTasksAction(settings, clusterName, threadPool, clusterService, transportService, - actionFilters, indexNameExpressionResolver); + actionFilters, indexNameExpressionResolver); transportService.acceptIncomingRequests(); } - public final TestClusterService clusterService; + public final ClusterService clusterService; public final TransportService transportService; public final DiscoveryNode discoveryNode; public final TransportListTasksAction transportListTasksAction; @@ -214,6 +216,7 @@ public abstract class TaskManagerTestCase extends ESTestCase { @Override public void close() { + clusterService.close(); transportService.close(); } } @@ -225,7 +228,7 @@ public abstract class TaskManagerTestCase extends ESTestCase { } DiscoveryNode master = discoveryNodes[0]; for (TestNode node : nodes) { - node.clusterService.setState(ClusterStateCreationUtils.state(node.discoveryNode, master, discoveryNodes)); + setState(node.clusterService, ClusterStateCreationUtils.state(node.discoveryNode, master, discoveryNodes)); } for (TestNode nodeA : nodes) { for (TestNode nodeB : nodes) { @@ -238,7 +241,7 @@ public abstract class TaskManagerTestCase extends ESTestCase { RecordingTaskManagerListener[] listeners = new RecordingTaskManagerListener[nodes.length]; for (int i = 0; i < nodes.length; i++) { listeners[i] = new RecordingTaskManagerListener(nodes[i].discoveryNode, actionMasks); - ((MockTaskManager) (nodes[i].clusterService.getTaskManager())).addListener(listeners[i]); + ((MockTaskManager) (nodes[i].transportService.getTaskManager())).addListener(listeners[i]); } return listeners; } diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index eaa3caf9084..b22d93ef6b2 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.action.admin.cluster.node.tasks; +import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.ListenableActionFuture; import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; @@ -27,10 +29,10 @@ import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskInfo; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeAction; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction; +import org.elasticsearch.action.fieldstats.FieldStatsAction; import org.elasticsearch.action.index.IndexAction; -import org.elasticsearch.action.percolate.PercolateAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; @@ -40,6 +42,8 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.tasks.MockTaskManager; import org.elasticsearch.test.tasks.MockTaskManagerListener; import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.transport.ReceiveTimeoutTransportException; +import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.ArrayList; @@ -54,8 +58,11 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.ReentrantLock; import java.util.function.Function; +import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; +import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.emptyCollectionOf; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; @@ -118,20 +125,20 @@ public class TasksIT extends ESIntegTestCase { } public void testTransportReplicationAllShardsTasks() { - registerTaskManageListeners(PercolateAction.NAME); // main task - registerTaskManageListeners(PercolateAction.NAME + "[s]"); // shard level tasks + registerTaskManageListeners(FieldStatsAction.NAME); // main task + registerTaskManageListeners(FieldStatsAction.NAME + "[s]"); // shard level tasks createIndex("test"); ensureGreen("test"); // Make sure all shards are allocated - client().preparePercolate().setIndices("test").setDocumentType("foo").setSource("{}").get(); + client().prepareFieldStats().setFields("field").get(); // the percolate operation should produce one main task NumShards numberOfShards = getNumShards("test"); - assertEquals(1, numberOfEvents(PercolateAction.NAME, Tuple::v1)); + assertEquals(1, numberOfEvents(FieldStatsAction.NAME, Tuple::v1)); // and then one operation per shard - assertEquals(numberOfShards.totalNumShards, numberOfEvents(PercolateAction.NAME + "[s]", Tuple::v1)); + assertEquals(numberOfShards.totalNumShards, numberOfEvents(FieldStatsAction.NAME + "[s]", Tuple::v1)); // the shard level tasks should have the main task as a parent - assertParentTask(findEvents(PercolateAction.NAME + "[s]", Tuple::v1), findEvents(PercolateAction.NAME, Tuple::v1).get(0)); + assertParentTask(findEvents(FieldStatsAction.NAME + "[s]", Tuple::v1), findEvents(FieldStatsAction.NAME, Tuple::v1).get(0)); } public void testTransportBroadcastByNodeTasks() { @@ -257,8 +264,8 @@ public class TasksIT extends ESIntegTestCase { ReentrantLock taskFinishLock = new ReentrantLock(); taskFinishLock.lock(); CountDownLatch taskRegistered = new CountDownLatch(1); - for (ClusterService clusterService : internalCluster().getInstances(ClusterService.class)) { - ((MockTaskManager)clusterService.getTaskManager()).addListener(new MockTaskManagerListener() { + for (TransportService transportService : internalCluster().getInstances(TransportService.class)) { + ((MockTaskManager) transportService.getTaskManager()).addListener(new MockTaskManagerListener() { @Override public void onTaskRegistered(Task task) { if (task.getAction().startsWith(IndexAction.NAME)) { @@ -327,10 +334,82 @@ public class TasksIT extends ESIntegTestCase { assertEquals(0, client().admin().cluster().prepareListTasks().setActions(TestTaskPlugin.TestTaskAction.NAME + "[n]").get().getTasks().size()); } + public void testTasksListWaitForCompletion() throws Exception { + // Start blocking test task + ListenableActionFuture future = TestTaskPlugin.TestTaskAction.INSTANCE.newRequestBuilder(client()) + .execute(); + + ListenableActionFuture waitResponseFuture; + try { + // Wait for the task to start on all nodes + assertBusy(() -> assertEquals(internalCluster().numDataAndMasterNodes(), + client().admin().cluster().prepareListTasks().setActions(TestTaskPlugin.TestTaskAction.NAME + "[n]").get().getTasks().size())); + + // Spin up a request to wait for that task to finish + waitResponseFuture = client().admin().cluster().prepareListTasks() + .setActions(TestTaskPlugin.TestTaskAction.NAME + "[n]").setWaitForCompletion(true).execute(); + } finally { + // Unblock the request so the wait for completion request can finish + TestTaskPlugin.UnblockTestTasksAction.INSTANCE.newRequestBuilder(client()).get(); + } + + // Now that the task is unblocked the list response will come back + ListTasksResponse waitResponse = waitResponseFuture.get(); + // If any tasks come back then they are the tasks we asked for - it'd be super weird if this wasn't true + for (TaskInfo task: waitResponse.getTasks()) { + assertEquals(task.getAction(), TestTaskPlugin.TestTaskAction.NAME + "[n]"); + } + // See the next test to cover the timeout case + + future.get(); + } + + public void testTasksListWaitForTimeout() throws Exception { + // Start blocking test task + ListenableActionFuture future = TestTaskPlugin.TestTaskAction.INSTANCE.newRequestBuilder(client()) + .execute(); + try { + // Wait for the task to start on all nodes + assertBusy(() -> assertEquals(internalCluster().numDataAndMasterNodes(), + client().admin().cluster().prepareListTasks().setActions(TestTaskPlugin.TestTaskAction.NAME + "[n]").get().getTasks().size())); + + // Spin up a request that should wait for those tasks to finish + // It will timeout because we haven't unblocked the tasks + ListTasksResponse waitResponse = client().admin().cluster().prepareListTasks() + .setActions(TestTaskPlugin.TestTaskAction.NAME + "[n]").setWaitForCompletion(true).setTimeout(timeValueMillis(100)) + .get(); + + assertFalse(waitResponse.getNodeFailures().isEmpty()); + for (FailedNodeException failure : waitResponse.getNodeFailures()) { + Throwable timeoutException = failure.getCause(); + // The exception sometimes comes back wrapped depending on the client + if (timeoutException.getCause() != null) { + timeoutException = timeoutException.getCause(); + } + assertThat(timeoutException, + either(instanceOf(ElasticsearchTimeoutException.class)).or(instanceOf(ReceiveTimeoutTransportException.class))); + } + } finally { + // Now we can unblock those requests + TestTaskPlugin.UnblockTestTasksAction.INSTANCE.newRequestBuilder(client()).get(); + } + future.get(); + } + + public void testTasksListWaitForNoTask() throws Exception { + // Spin up a request to wait for no matching tasks + ListenableActionFuture waitResponseFuture = client().admin().cluster().prepareListTasks() + .setActions(TestTaskPlugin.TestTaskAction.NAME + "[n]").setWaitForCompletion(true).setTimeout(timeValueMillis(10)) + .execute(); + + // It should finish quickly and without complaint + assertThat(waitResponseFuture.get().getTasks(), emptyCollectionOf(TaskInfo.class)); + } + @Override public void tearDown() throws Exception { for (Map.Entry, RecordingTaskManagerListener> entry : listeners.entrySet()) { - ((MockTaskManager)internalCluster().getInstance(ClusterService.class, entry.getKey().v1()).getTaskManager()).removeListener(entry.getValue()); + ((MockTaskManager) internalCluster().getInstance(TransportService.class, entry.getKey().v1()).getTaskManager()).removeListener(entry.getValue()); } listeners.clear(); super.tearDown(); @@ -340,10 +419,10 @@ public class TasksIT extends ESIntegTestCase { * Registers recording task event listeners with the given action mask on all nodes */ private void registerTaskManageListeners(String actionMasks) { - for (ClusterService clusterService : internalCluster().getInstances(ClusterService.class)) { - DiscoveryNode node = clusterService.localNode(); + for (String nodeName : internalCluster().getNodeNames()) { + DiscoveryNode node = internalCluster().getInstance(ClusterService.class, nodeName).localNode(); RecordingTaskManagerListener listener = new RecordingTaskManagerListener(node, Strings.splitStringToArray(actionMasks, ',')); - ((MockTaskManager)clusterService.getTaskManager()).addListener(listener); + ((MockTaskManager) internalCluster().getInstance(TransportService.class, nodeName).getTaskManager()).addListener(listener); RecordingTaskManagerListener oldListener = listeners.put(new Tuple<>(node.name(), actionMasks), listener); assertNull(oldListener); } diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java index 0d4372a51eb..72ea730f881 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java @@ -35,10 +35,10 @@ import org.elasticsearch.action.support.tasks.BaseTasksResponse; import org.elasticsearch.action.support.tasks.TransportTasksAction; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -345,7 +345,10 @@ public class TestTaskPlugin extends Plugin { public static class UnblockTestTasksRequest extends BaseTasksRequest { - + @Override + public boolean match(Task task) { + return task instanceof TestTask && super.match(task); + } } public static class UnblockTestTasksResponse extends BaseTasksResponse { diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java index e1501f9b14c..64d69a4864f 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java @@ -35,9 +35,9 @@ import org.elasticsearch.action.support.tasks.BaseTasksRequest; import org.elasticsearch.action.support.tasks.BaseTasksResponse; import org.elasticsearch.action.support.tasks.TransportTasksAction; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -48,7 +48,6 @@ import org.elasticsearch.test.tasks.MockTaskManager; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; - import java.io.IOException; import java.util.ArrayList; import java.util.Collections; @@ -294,14 +293,14 @@ public class TransportTasksActionTests extends TaskManagerTestCase { actions[i] = new TestNodesAction(Settings.EMPTY, "testAction", clusterName, threadPool, testNodes[i].clusterService, testNodes[i].transportService) { @Override protected NodeResponse nodeOperation(NodeRequest request) { - logger.info("Action on node " + node); + logger.info("Action on node {}", node); actionLatch.countDown(); try { checkLatch.await(); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } - logger.info("Action on node " + node + " finished"); + logger.info("Action on node {} finished", node); return new NodeResponse(testNodes[node].discoveryNode); } }; @@ -355,7 +354,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase { int testNodeNum = randomIntBetween(0, testNodes.length - 1); TestNode testNode = testNodes[testNodeNum]; ListTasksRequest listTasksRequest = new ListTasksRequest(); - listTasksRequest.actions("testAction*"); // pick all test actions + listTasksRequest.setActions("testAction*"); // pick all test actions logger.info("Listing currently running tasks using node [{}]", testNodeNum); ListTasksResponse response = testNode.transportListTasksAction.execute(listTasksRequest).get(); logger.info("Checking currently running tasks"); @@ -371,7 +370,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase { // Check task counts using transport with filtering testNode = testNodes[randomIntBetween(0, testNodes.length - 1)]; listTasksRequest = new ListTasksRequest(); - listTasksRequest.actions("testAction[n]"); // only pick node actions + listTasksRequest.setActions("testAction[n]"); // only pick node actions response = testNode.transportListTasksAction.execute(listTasksRequest).get(); assertEquals(testNodes.length, response.getPerNodeTasks().size()); for (Map.Entry> entry : response.getPerNodeTasks().entrySet()) { @@ -380,7 +379,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase { } // Check task counts using transport with detailed description - listTasksRequest.detailed(true); // same request only with detailed description + listTasksRequest.setDetailed(true); // same request only with detailed description response = testNode.transportListTasksAction.execute(listTasksRequest).get(); assertEquals(testNodes.length, response.getPerNodeTasks().size()); for (Map.Entry> entry : response.getPerNodeTasks().entrySet()) { @@ -389,7 +388,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase { } // Make sure that the main task on coordinating node is the task that was returned to us by execute() - listTasksRequest.actions("testAction"); // only pick the main task + listTasksRequest.setActions("testAction"); // only pick the main task response = testNode.transportListTasksAction.execute(listTasksRequest).get(); assertEquals(1, response.getTasks().size()); assertEquals(mainTask.getId(), response.getTasks().get(0).getId()); @@ -417,7 +416,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase { // Get the parent task ListTasksRequest listTasksRequest = new ListTasksRequest(); - listTasksRequest.actions("testAction"); + listTasksRequest.setActions("testAction"); ListTasksResponse response = testNode.transportListTasksAction.execute(listTasksRequest).get(); assertEquals(1, response.getTasks().size()); String parentNode = response.getTasks().get(0).getNode().getId(); @@ -425,7 +424,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase { // Find tasks with common parent listTasksRequest = new ListTasksRequest(); - listTasksRequest.parentTaskId(new TaskId(parentNode, parentTaskId)); + listTasksRequest.setParentTaskId(new TaskId(parentNode, parentTaskId)); response = testNode.transportListTasksAction.execute(listTasksRequest).get(); assertEquals(testNodes.length, response.getTasks().size()); for (TaskInfo task : response.getTasks()) { @@ -451,7 +450,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase { // Get the parent task ListTasksRequest listTasksRequest = new ListTasksRequest(); - listTasksRequest.actions("testAction*"); + listTasksRequest.setActions("testAction*"); ListTasksResponse response = testNode.transportListTasksAction.execute(listTasksRequest).get(); assertEquals(0, response.getTasks().size()); @@ -472,7 +471,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase { // Check task counts using transport with filtering TestNode testNode = testNodes[randomIntBetween(0, testNodes.length - 1)]; ListTasksRequest listTasksRequest = new ListTasksRequest(); - listTasksRequest.actions("testAction[n]"); // only pick node actions + listTasksRequest.setActions("testAction[n]"); // only pick node actions ListTasksResponse response = testNode.transportListTasksAction.execute(listTasksRequest).get(); assertEquals(testNodes.length, response.getPerNodeTasks().size()); for (Map.Entry> entry : response.getPerNodeTasks().entrySet()) { @@ -482,7 +481,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase { // Check task counts using transport with detailed description long minimalDurationNanos = System.nanoTime() - maximumStartTimeNanos; - listTasksRequest.detailed(true); // same request only with detailed description + listTasksRequest.setDetailed(true); // same request only with detailed description response = testNode.transportListTasksAction.execute(listTasksRequest).get(); assertEquals(testNodes.length, response.getPerNodeTasks().size()); for (Map.Entry> entry : response.getPerNodeTasks().entrySet()) { @@ -518,9 +517,9 @@ public class TransportTasksActionTests extends TaskManagerTestCase { // Try to cancel main task using action name CancelTasksRequest request = new CancelTasksRequest(); - request.nodesIds(testNodes[0].discoveryNode.getId()); - request.reason("Testing Cancellation"); - request.actions(actionName); + request.setNodesIds(testNodes[0].discoveryNode.getId()); + request.setReason("Testing Cancellation"); + request.setActions(actionName); CancelTasksResponse response = testNodes[randomIntBetween(0, testNodes.length - 1)].transportCancelTasksAction.execute(request) .get(); @@ -532,8 +531,8 @@ public class TransportTasksActionTests extends TaskManagerTestCase { // Try to cancel main task using id request = new CancelTasksRequest(); - request.reason("Testing Cancellation"); - request.taskId(new TaskId(testNodes[0].discoveryNode.getId(), task.getId())); + request.setReason("Testing Cancellation"); + request.setTaskId(new TaskId(testNodes[0].discoveryNode.getId(), task.getId())); response = testNodes[randomIntBetween(0, testNodes.length - 1)].transportCancelTasksAction.execute(request).get(); // Shouldn't match any tasks since testAction doesn't support cancellation @@ -544,7 +543,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase { // Make sure that task is still running ListTasksRequest listTasksRequest = new ListTasksRequest(); - listTasksRequest.actions(actionName); + listTasksRequest.setActions(actionName); ListTasksResponse listResponse = testNodes[randomIntBetween(0, testNodes.length - 1)].transportListTasksAction.execute (listTasksRequest).get(); assertEquals(1, listResponse.getPerNodeTasks().size()); @@ -565,7 +564,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase { actions[i] = new TestNodesAction(Settings.EMPTY, "testAction", clusterName, threadPool, testNodes[i].clusterService, testNodes[i].transportService) { @Override protected NodeResponse nodeOperation(NodeRequest request) { - logger.info("Action on node " + node); + logger.info("Action on node {}", node); throw new RuntimeException("Test exception"); } }; @@ -604,9 +603,9 @@ public class TransportTasksActionTests extends TaskManagerTestCase { tasksActions[i] = new TestTasksAction(Settings.EMPTY, "testTasksAction", clusterName, threadPool, testNodes[i].clusterService, testNodes[i].transportService) { @Override protected TestTaskResponse taskOperation(TestTasksRequest request, Task task) { - logger.info("Task action on node " + node); + logger.info("Task action on node {}", node); if (failTaskOnNode == node && task.getParentTaskId().isSet() == false) { - logger.info("Failing on node " + node); + logger.info("Failing on node {}", node); throw new RuntimeException("Task level failure"); } return new TestTaskResponse("Success on node " + node); @@ -617,7 +616,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase { // Run task action on node tasks that are currently running // should be successful on all nodes except one TestTasksRequest testTasksRequest = new TestTasksRequest(); - testTasksRequest.actions("testAction[n]"); // pick all test actions + testTasksRequest.setActions("testAction[n]"); // pick all test actions TestTasksResponse response = tasksActions[0].execute(testTasksRequest).get(); // Get successful responses from all nodes except one assertEquals(testNodes.length - 1, response.tasks.size()); diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java index 9c554da781a..503db65e810 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java @@ -34,7 +34,7 @@ import static org.hamcrest.Matchers.hasSize; /** * This class tests that repository operations (Put, Delete, Verify) are blocked when the cluster is read-only. * - * The @ClusterScope TEST is needed because this class updates the cluster setting "cluster.blocks.read_only". + * The @NodeScope TEST is needed because this class updates the cluster setting "cluster.blocks.read_only". */ @ClusterScope(scope = ESIntegTestCase.Scope.TEST) public class RepositoryBlocksIT extends ESIntegTestCase { diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java index f3a23be919d..82a2637d76b 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java @@ -41,7 +41,7 @@ import static org.hamcrest.Matchers.hasSize; /** * This class tests that snapshot operations (Create, Delete, Restore) are blocked when the cluster is read-only. * - * The @ClusterScope TEST is needed because this class updates the cluster setting "cluster.blocks.read_only". + * The @NodeScope TEST is needed because this class updates the cluster setting "cluster.blocks.read_only". */ @ClusterScope(scope = ESIntegTestCase.Scope.TEST) public class SnapshotBlocksIT extends ESIntegTestCase { diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java index a2d838bc3fd..fc04de81254 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java @@ -55,13 +55,7 @@ public class ClusterStateRequestTests extends ESTestCase { assertThat(deserializedCSRequest.nodes(), equalTo(clusterStateRequest.nodes())); assertThat(deserializedCSRequest.blocks(), equalTo(clusterStateRequest.blocks())); assertThat(deserializedCSRequest.indices(), equalTo(clusterStateRequest.indices())); - - if (testVersion.onOrAfter(Version.V_1_5_0)) { - assertOptionsMatch(deserializedCSRequest.indicesOptions(), clusterStateRequest.indicesOptions()); - } else { - // versions before V_1_5_0 use IndicesOptions.lenientExpandOpen() - assertOptionsMatch(deserializedCSRequest.indicesOptions(), IndicesOptions.lenientExpandOpen()); - } + assertOptionsMatch(deserializedCSRequest.indicesOptions(), clusterStateRequest.indicesOptions()); } } diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java index 1eeba76dbca..3b1040446e0 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.admin.indices.create; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; import org.elasticsearch.action.search.SearchResponse; @@ -29,6 +30,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.query.RangeQueryBuilder; @@ -179,7 +181,6 @@ public class CreateIndexIT extends ESIntegTestCase { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/14932,https://github.com/elastic/elasticsearch/pull/15853" ) public void testCreateAndDeleteIndexConcurrently() throws InterruptedException { createIndex("test"); final AtomicInteger indexVersion = new AtomicInteger(0); @@ -224,10 +225,14 @@ public class CreateIndexIT extends ESIntegTestCase { for (int i = 0; i < numDocs; i++) { try { synchronized (indexVersionLock) { - client().prepareIndex("test", "test").setSource("index_version", indexVersion.get()).get(); + client().prepareIndex("test", "test").setSource("index_version", indexVersion.get()) + .setTimeout(TimeValue.timeValueSeconds(10)).get(); } } catch (IndexNotFoundException inf) { // fine + } catch (UnavailableShardsException ex) { + assertEquals(ex.getCause().getClass(), IndexNotFoundException.class); + // fine we run into a delete index while retrying } } latch.await(); diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java index 967a9026469..04892b82339 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.admin.indices.mapping.put; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.index.Index; import org.elasticsearch.test.ESTestCase; public class PutMappingRequestTests extends ESTestCase { @@ -48,5 +49,12 @@ public class PutMappingRequestTests extends ESTestCase { r.source("somevalidmapping"); ex = r.validate(); assertNull("validation should succeed", ex); + + r.setConcreteIndex(new Index("foo", "bar")); + ex = r.validate(); + assertNotNull("source validation should fail", ex); + assertEquals(ex.getMessage(), + "Validation Failed: 1: either concrete index or unresolved indices can be set," + + " concrete index: [[foo/bar]] and indices: [myindex];"); } } diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java index 2e39c39cfd2..c31993ebb81 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java @@ -32,6 +32,7 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.common.collect.ImmutableOpenIntMap; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; @@ -157,6 +158,7 @@ public class IndicesShardStoreRequestIT extends ESIntegTestCase { .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "5") .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) )); + indexRandomData(index); ensureGreen(index); @@ -165,9 +167,10 @@ public class IndicesShardStoreRequestIT extends ESIntegTestCase { logger.info("--> corrupt random shard copies"); Map> corruptedShardIDMap = new HashMap<>(); + Index idx = resolveIndex(index); for (String node : internalCluster().nodesInclude(index)) { IndicesService indexServices = internalCluster().getInstance(IndicesService.class, node); - IndexService indexShards = indexServices.indexServiceSafe(index); + IndexService indexShards = indexServices.indexServiceSafe(idx); for (Integer shardId : indexShards.shardIds()) { IndexShard shard = indexShards.getShard(shardId); if (randomBoolean()) { diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsTests.java index 13a2bb29981..77bd7c89927 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsTests.java @@ -99,7 +99,6 @@ public class IndicesStatsTests extends ESSingleNodeTestCase { assertThat(commitStats.getId(), notNullValue()); assertThat(commitStats.getUserData(), hasKey(Translog.TRANSLOG_GENERATION_KEY)); assertThat(commitStats.getUserData(), hasKey(Translog.TRANSLOG_UUID_KEY)); - } } diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/upgrade/UpgradeIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/upgrade/UpgradeIT.java index 9d8002210e7..620cef31f9a 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/upgrade/UpgradeIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/upgrade/UpgradeIT.java @@ -98,13 +98,7 @@ public class UpgradeIT extends ESBackcompatTestCase { } indexRandom(true, docs); ensureGreen(indexName); - if (globalCompatibilityVersion().before(Version.V_1_4_0_Beta1)) { - // before 1.4 and the wait_if_ongoing flag, flushes could fail randomly, so we - // need to continue to try flushing until all shards succeed - assertTrue(awaitBusy(() -> flush(indexName).getFailedShards() == 0)); - } else { - assertEquals(0, flush(indexName).getFailedShards()); - } + assertEquals(0, flush(indexName).getFailedShards()); // index more docs that won't be flushed numDocs = scaledRandomIntBetween(100, 1000); @@ -140,7 +134,7 @@ public class UpgradeIT extends ESBackcompatTestCase { // means we can never generate ancient segments in this test (unless Lucene major version bumps but ES major version does not): assertFalse(hasAncientSegments(client(), indexToUpgrade)); - logger.info("--> Running upgrade on index " + indexToUpgrade); + logger.info("--> Running upgrade on index {}", indexToUpgrade); assertNoFailures(client().admin().indices().prepareUpgrade(indexToUpgrade).get()); awaitBusy(() -> { try { @@ -234,7 +228,7 @@ public class UpgradeIT extends ESBackcompatTestCase { ESLogger logger = Loggers.getLogger(UpgradeIT.class); int toUpgrade = 0; for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) { - logger.info("Index: " + status.getIndex() + ", total: " + status.getTotalBytes() + ", toUpgrade: " + status.getToUpgradeBytes()); + logger.info("Index: {}, total: {}, toUpgrade: {}", status.getIndex(), status.getTotalBytes(), status.getToUpgradeBytes()); toUpgrade += status.getToUpgradeBytes(); } return toUpgrade == 0; diff --git a/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java b/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java index 6cd877315cd..e36b4e4f028 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java @@ -28,19 +28,19 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.AutoCreateIndex; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.cluster.TestClusterService; import org.elasticsearch.test.transport.CapturingTransport; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.junit.After; import org.junit.Before; import java.nio.charset.StandardCharsets; @@ -49,6 +49,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.function.LongSupplier; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.createClusterService; import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -57,16 +58,23 @@ import static org.mockito.Mockito.mock; public class TransportBulkActionTookTests extends ESTestCase { private ThreadPool threadPool; + private ClusterService clusterService; @Before public void setUp() throws Exception { super.setUp(); threadPool = mock(ThreadPool.class); + clusterService = createClusterService(threadPool); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + clusterService.close(); } private TransportBulkAction createAction(boolean controlled, AtomicLong expected) { CapturingTransport capturingTransport = new CapturingTransport(); - ClusterService clusterService = new TestClusterService(threadPool); TransportService transportService = new TransportService(capturingTransport, threadPool); transportService.start(); transportService.acceptIncomingRequests(); @@ -191,7 +199,7 @@ public class TransportBulkActionTookTests extends ESTestCase { } @Override - public String[] concreteIndices(ClusterState state, IndicesRequest request) { + public String[] concreteIndexNames(ClusterState state, IndicesRequest request) { return request.indices(); } } diff --git a/core/src/test/java/org/elasticsearch/action/ingest/IngestProxyActionFilterTests.java b/core/src/test/java/org/elasticsearch/action/ingest/IngestProxyActionFilterTests.java index fa9728c4cd1..c027e4ecd18 100644 --- a/core/src/test/java/org/elasticsearch/action/ingest/IngestProxyActionFilterTests.java +++ b/core/src/test/java/org/elasticsearch/action/ingest/IngestProxyActionFilterTests.java @@ -29,10 +29,10 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.ActionFilterChain; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.transport.DummyTransportAddress; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; diff --git a/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java b/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java index ed829d8fa8d..595b93773e3 100644 --- a/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java @@ -45,7 +45,7 @@ public class MultiSearchRequestTests extends ESTestCase { IndicesQueriesRegistry registry = new IndicesQueriesRegistry(Settings.EMPTY, singletonMap("match_all", new MatchAllQueryParser())); byte[] data = StreamsUtils.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch1.json"); MultiSearchRequest request = RestMultiSearchAction.parseRequest(new MultiSearchRequest(), new BytesArray(data), false, null, null, - null, null, IndicesOptions.strictExpandOpenAndForbidClosed(), true, registry, ParseFieldMatcher.EMPTY, null); + null, null, IndicesOptions.strictExpandOpenAndForbidClosed(), true, registry, ParseFieldMatcher.EMPTY, null, null); assertThat(request.requests().size(), equalTo(8)); assertThat(request.requests().get(0).indices()[0], equalTo("test")); assertThat(request.requests().get(0).indicesOptions(), equalTo(IndicesOptions.fromOptions(true, true, true, true, IndicesOptions.strictExpandOpenAndForbidClosed()))); @@ -72,7 +72,7 @@ public class MultiSearchRequestTests extends ESTestCase { IndicesQueriesRegistry registry = new IndicesQueriesRegistry(Settings.EMPTY, singletonMap("match_all", new MatchAllQueryParser())); byte[] data = StreamsUtils.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch2.json"); MultiSearchRequest request = RestMultiSearchAction.parseRequest(new MultiSearchRequest(), new BytesArray(data), false, null, null, - null, null, IndicesOptions.strictExpandOpenAndForbidClosed(), true, registry, ParseFieldMatcher.EMPTY, null); + null, null, IndicesOptions.strictExpandOpenAndForbidClosed(), true, registry, ParseFieldMatcher.EMPTY, null, null); assertThat(request.requests().size(), equalTo(5)); assertThat(request.requests().get(0).indices()[0], equalTo("test")); assertThat(request.requests().get(0).types().length, equalTo(0)); @@ -91,7 +91,7 @@ public class MultiSearchRequestTests extends ESTestCase { IndicesQueriesRegistry registry = new IndicesQueriesRegistry(Settings.EMPTY, singletonMap("match_all", new MatchAllQueryParser())); byte[] data = StreamsUtils.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch3.json"); MultiSearchRequest request = RestMultiSearchAction.parseRequest(new MultiSearchRequest(), new BytesArray(data), false, null, null, - null, null, IndicesOptions.strictExpandOpenAndForbidClosed(), true, registry, ParseFieldMatcher.EMPTY, null); + null, null, IndicesOptions.strictExpandOpenAndForbidClosed(), true, registry, ParseFieldMatcher.EMPTY, null, null); assertThat(request.requests().size(), equalTo(4)); assertThat(request.requests().get(0).indices()[0], equalTo("test0")); assertThat(request.requests().get(0).indices()[1], equalTo("test1")); @@ -111,7 +111,7 @@ public class MultiSearchRequestTests extends ESTestCase { IndicesQueriesRegistry registry = new IndicesQueriesRegistry(Settings.EMPTY, singletonMap("match_all", new MatchAllQueryParser())); byte[] data = StreamsUtils.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch4.json"); MultiSearchRequest request = RestMultiSearchAction.parseRequest(new MultiSearchRequest(), new BytesArray(data), false, null, null, - null, null, IndicesOptions.strictExpandOpenAndForbidClosed(), true, registry, ParseFieldMatcher.EMPTY, null); + null, null, IndicesOptions.strictExpandOpenAndForbidClosed(), true, registry, ParseFieldMatcher.EMPTY, null, null); assertThat(request.requests().size(), equalTo(3)); assertThat(request.requests().get(0).indices()[0], equalTo("test0")); assertThat(request.requests().get(0).indices()[1], equalTo("test1")); @@ -133,7 +133,7 @@ public class MultiSearchRequestTests extends ESTestCase { IndicesQueriesRegistry registry = new IndicesQueriesRegistry(Settings.EMPTY, singletonMap("match_all", new MatchAllQueryParser())); byte[] data = StreamsUtils.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch5.json"); MultiSearchRequest request = RestMultiSearchAction.parseRequest(new MultiSearchRequest(), new BytesArray(data), true, null, null, - null, null, IndicesOptions.strictExpandOpenAndForbidClosed(), true, registry, ParseFieldMatcher.EMPTY, null); + null, null, IndicesOptions.strictExpandOpenAndForbidClosed(), true, registry, ParseFieldMatcher.EMPTY, null, null); assertThat(request.requests().size(), equalTo(3)); assertThat(request.requests().get(0).indices()[0], equalTo("test0")); assertThat(request.requests().get(0).indices()[1], equalTo("test1")); diff --git a/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java b/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java index 2615e5a0b22..b166f5f45c3 100644 --- a/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java @@ -44,6 +44,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.ShardsIterator; import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.DummyTransportAddress; @@ -51,13 +52,13 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.cluster.TestClusterService; import org.elasticsearch.test.transport.CapturingTransport; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportResponseOptions; import org.elasticsearch.transport.TransportService; +import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -73,6 +74,8 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.createClusterService; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.setState; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.object.HasToString.hasToString; @@ -83,7 +86,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase { private static final String TEST_CLUSTER = "test-cluster"; private static ThreadPool THREAD_POOL; - private TestClusterService clusterService; + private ClusterService clusterService; private CapturingTransport transport; private TestTransportBroadcastByNodeAction action; @@ -168,7 +171,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase { } @Override - public String[] concreteIndices(ClusterState state, IndicesRequest request) { + public String[] concreteIndexNames(ClusterState state, IndicesRequest request) { return request.indices(); } } @@ -182,7 +185,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase { public void setUp() throws Exception { super.setUp(); transport = new CapturingTransport(); - clusterService = new TestClusterService(THREAD_POOL); + clusterService = createClusterService(THREAD_POOL); final TransportService transportService = new TransportService(transport, THREAD_POOL); transportService.start(); transportService.acceptIncomingRequests(); @@ -197,10 +200,16 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase { ); } - void setClusterState(TestClusterService clusterService, String index) { + @After + public void tearDown() throws Exception { + super.tearDown(); + clusterService.close(); + } + + void setClusterState(ClusterService clusterService, String index) { int numberOfNodes = randomIntBetween(3, 5); DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); - IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(new Index(index,"_na_")); + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(new Index(index, "_na_")); int shardIndex = -1; for (int i = 0; i < numberOfNodes; i++) { @@ -221,7 +230,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase { stateBuilder.nodes(discoBuilder); stateBuilder.routingTable(RoutingTable.builder().add(indexRoutingTable.build()).build()); ClusterState clusterState = stateBuilder.build(); - clusterService.setState(clusterState); + setState(clusterService, clusterState); } static DiscoveryNode newNode(int nodeId) { @@ -241,7 +250,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase { ClusterBlocks.Builder block = ClusterBlocks.builder() .addGlobalBlock(new ClusterBlock(1, "test-block", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); - clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block)); + setState(clusterService, ClusterState.builder(clusterService.state()).blocks(block)); try { action.new AsyncAction(null, request, listener).start(); fail("expected ClusterBlockException"); @@ -256,7 +265,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase { ClusterBlocks.Builder block = ClusterBlocks.builder() .addIndexBlock(TEST_INDEX, new ClusterBlock(1, "test-block", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); - clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block)); + setState(clusterService, ClusterState.builder(clusterService.state()).blocks(block)); try { action.new AsyncAction(null, request, listener).start(); fail("expected ClusterBlockException"); @@ -301,7 +310,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase { DiscoveryNodes.Builder builder = DiscoveryNodes.builder(clusterService.state().getNodes()); builder.remove(masterNode.id()); - clusterService.setState(ClusterState.builder(clusterService.state()).nodes(builder)); + setState(clusterService, ClusterState.builder(clusterService.state()).nodes(builder)); action.new AsyncAction(null, request, listener).start(); @@ -348,7 +357,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase { TransportResponse response = channel.getCapturedResponse(); assertTrue(response instanceof TransportBroadcastByNodeAction.NodeResponse); - TransportBroadcastByNodeAction.NodeResponse nodeResponse = (TransportBroadcastByNodeAction.NodeResponse)response; + TransportBroadcastByNodeAction.NodeResponse nodeResponse = (TransportBroadcastByNodeAction.NodeResponse) response; // check the operation was executed on the correct node assertEquals("node id", nodeId, nodeResponse.getNodeId()); @@ -387,7 +396,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase { builder.remove(failedMasterNode.id()); builder.masterNodeId(null); - clusterService.setState(ClusterState.builder(clusterService.state()).nodes(builder)); + setState(clusterService, ClusterState.builder(clusterService.state()).nodes(builder)); } action.new AsyncAction(null, request, listener).start(); diff --git a/core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java b/core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java index 860f95ace55..226099e32f7 100644 --- a/core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java @@ -27,7 +27,6 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.NotMasterException; import org.elasticsearch.cluster.block.ClusterBlock; @@ -36,6 +35,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.DummyTransportAddress; import org.elasticsearch.common.unit.TimeValue; @@ -44,11 +44,11 @@ import org.elasticsearch.discovery.MasterNotDiscoveredException; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.cluster.TestClusterService; import org.elasticsearch.test.transport.CapturingTransport; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.TransportService; +import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -58,13 +58,15 @@ import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.createClusterService; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.setState; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; public class TransportMasterNodeActionTests extends ESTestCase { private static ThreadPool threadPool; - private TestClusterService clusterService; + private ClusterService clusterService; private TransportService transportService; private CapturingTransport transport; private DiscoveryNode localNode; @@ -81,13 +83,20 @@ public class TransportMasterNodeActionTests extends ESTestCase { public void setUp() throws Exception { super.setUp(); transport = new CapturingTransport(); - clusterService = new TestClusterService(threadPool); + clusterService = createClusterService(threadPool); transportService = new TransportService(transport, threadPool); transportService.start(); transportService.acceptIncomingRequests(); localNode = new DiscoveryNode("local_node", DummyTransportAddress.INSTANCE, Version.CURRENT); remoteNode = new DiscoveryNode("remote_node", DummyTransportAddress.INSTANCE, Version.CURRENT); - allNodes = new DiscoveryNode[] { localNode, remoteNode }; + allNodes = new DiscoveryNode[]{localNode, remoteNode}; + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + clusterService.close(); + transportService.close(); } @AfterClass @@ -157,7 +166,7 @@ public class TransportMasterNodeActionTests extends ESTestCase { final Throwable exception = new Throwable(); final Response response = new Response(); - clusterService.setState(ClusterStateCreationUtils.state(localNode, localNode, allNodes)); + setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes)); new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) { @Override @@ -194,7 +203,7 @@ public class TransportMasterNodeActionTests extends ESTestCase { randomFrom(RestStatus.values()), ClusterBlockLevel.ALL); ClusterState stateWithBlock = ClusterState.builder(ClusterStateCreationUtils.state(localNode, localNode, allNodes)) .blocks(ClusterBlocks.builder().addGlobalBlock(block)).build(); - clusterService.setState(stateWithBlock); + setState(clusterService, stateWithBlock); new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) { @Override @@ -206,7 +215,7 @@ public class TransportMasterNodeActionTests extends ESTestCase { if (retryableBlock && unblockBeforeTimeout) { assertFalse(listener.isDone()); - clusterService.setState(ClusterState.builder(ClusterStateCreationUtils.state(localNode, localNode, allNodes)) + setState(clusterService, ClusterState.builder(ClusterStateCreationUtils.state(localNode, localNode, allNodes)) .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).build()); assertTrue(listener.isDone()); listener.get(); @@ -231,7 +240,7 @@ public class TransportMasterNodeActionTests extends ESTestCase { Request request = new Request(); PlainActionFuture listener = new PlainActionFuture<>(); - clusterService.setState(ClusterStateCreationUtils.state(localNode, randomFrom(null, localNode, remoteNode), allNodes)); + setState(clusterService, ClusterStateCreationUtils.state(localNode, randomFrom(null, localNode, remoteNode), allNodes)); new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) { @Override @@ -246,7 +255,7 @@ public class TransportMasterNodeActionTests extends ESTestCase { public void testMasterNotAvailable() throws ExecutionException, InterruptedException { Request request = new Request().masterNodeTimeout(TimeValue.timeValueSeconds(0)); - clusterService.setState(ClusterStateCreationUtils.state(localNode, null, allNodes)); + setState(clusterService, ClusterStateCreationUtils.state(localNode, null, allNodes)); PlainActionFuture listener = new PlainActionFuture<>(); new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool).execute(request, listener); assertTrue(listener.isDone()); @@ -255,18 +264,18 @@ public class TransportMasterNodeActionTests extends ESTestCase { public void testMasterBecomesAvailable() throws ExecutionException, InterruptedException { Request request = new Request(); - clusterService.setState(ClusterStateCreationUtils.state(localNode, null, allNodes)); + setState(clusterService, ClusterStateCreationUtils.state(localNode, null, allNodes)); PlainActionFuture listener = new PlainActionFuture<>(); new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool).execute(request, listener); assertFalse(listener.isDone()); - clusterService.setState(ClusterStateCreationUtils.state(localNode, localNode, allNodes)); + setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes)); assertTrue(listener.isDone()); listener.get(); } public void testDelegateToMaster() throws ExecutionException, InterruptedException { Request request = new Request(); - clusterService.setState(ClusterStateCreationUtils.state(localNode, remoteNode, allNodes)); + setState(clusterService, ClusterStateCreationUtils.state(localNode, remoteNode, allNodes)); PlainActionFuture listener = new PlainActionFuture<>(); new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool).execute(request, listener); @@ -286,7 +295,7 @@ public class TransportMasterNodeActionTests extends ESTestCase { public void testDelegateToFailingMaster() throws ExecutionException, InterruptedException { boolean failsWithConnectTransportException = randomBoolean(); Request request = new Request().masterNodeTimeout(TimeValue.timeValueSeconds(failsWithConnectTransportException ? 60 : 0)); - clusterService.setState(ClusterStateCreationUtils.state(localNode, remoteNode, allNodes)); + setState(clusterService, ClusterStateCreationUtils.state(localNode, remoteNode, allNodes)); PlainActionFuture listener = new PlainActionFuture<>(); new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool).execute(request, listener); @@ -300,7 +309,7 @@ public class TransportMasterNodeActionTests extends ESTestCase { if (failsWithConnectTransportException) { transport.handleRemoteError(capturedRequest.requestId, new ConnectTransportException(remoteNode, "Fake error")); assertFalse(listener.isDone()); - clusterService.setState(ClusterStateCreationUtils.state(localNode, localNode, allNodes)); + setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes)); assertTrue(listener.isDone()); listener.get(); } else { @@ -322,13 +331,13 @@ public class TransportMasterNodeActionTests extends ESTestCase { final Response response = new Response(); - clusterService.setState(ClusterStateCreationUtils.state(localNode, localNode, allNodes)); + setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes)); new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) { @Override protected void masterOperation(Request request, ClusterState state, ActionListener listener) throws Exception { // The other node has become master, simulate failures of this node while publishing cluster state through ZenDiscovery - TransportMasterNodeActionTests.this.clusterService.setState(ClusterStateCreationUtils.state(localNode, remoteNode, allNodes)); + setState(clusterService, ClusterStateCreationUtils.state(localNode, remoteNode, allNodes)); Throwable failure = randomBoolean() ? new Discovery.FailedToCommitClusterStateException("Fake error") : new NotMasterException("Fake error"); diff --git a/core/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java b/core/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java index 5c95e427458..75fc5854984 100644 --- a/core/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java @@ -24,17 +24,17 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeActionTests; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.DummyTransportAddress; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.cluster.TestClusterService; import org.elasticsearch.test.transport.CapturingTransport; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -50,12 +50,15 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReferenceArray; import java.util.function.Supplier; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.createClusterService; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.setState; + public class TransportNodesActionTests extends ESTestCase { private static ThreadPool THREAD_POOL; private static ClusterName CLUSTER_NAME = new ClusterName("test-cluster"); - private TestClusterService clusterService; + private ClusterService clusterService; private CapturingTransport transport; private TestTransportNodesAction action; @@ -114,7 +117,7 @@ public class TransportNodesActionTests extends ESTestCase { public void setUp() throws Exception { super.setUp(); transport = new CapturingTransport(); - clusterService = new TestClusterService(THREAD_POOL); + clusterService = createClusterService(THREAD_POOL); final TransportService transportService = new TransportService(transport, THREAD_POOL); transportService.start(); transportService.acceptIncomingRequests(); @@ -138,7 +141,7 @@ public class TransportNodesActionTests extends ESTestCase { ClusterState.Builder stateBuilder = ClusterState.builder(CLUSTER_NAME); stateBuilder.nodes(discoBuilder); ClusterState clusterState = stateBuilder.build(); - clusterService.setState(clusterState); + setState(clusterService, clusterState); action = new TestTransportNodesAction( Settings.EMPTY, THREAD_POOL, @@ -151,6 +154,13 @@ public class TransportNodesActionTests extends ESTestCase { ); } + @After + public void tearDown() throws Exception { + super.tearDown(); + clusterService.close(); + transport.close(); + } + private static DiscoveryNode newNode(int nodeId, Map attributes) { String node = "node_" + nodeId; return new DiscoveryNode(node, node, DummyTransportAddress.INSTANCE, attributes, Version.CURRENT); diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java index 03869974444..4125f02b956 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java @@ -31,10 +31,10 @@ import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.broadcast.BroadcastRequest; import org.elasticsearch.action.support.broadcast.BroadcastResponse; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; @@ -43,10 +43,10 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.cluster.TestClusterService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.local.LocalTransport; +import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -63,6 +63,8 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.state; import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.stateWithAssignedPrimariesAndOneReplica; import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.stateWithNoShard; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.createClusterService; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.setState; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -70,9 +72,8 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; public class BroadcastReplicationTests extends ESTestCase { private static ThreadPool threadPool; - private TestClusterService clusterService; + private ClusterService clusterService; private TransportService transportService; - private LocalTransport transport; private TestBroadcastReplicationAction broadcastReplicationAction; @BeforeClass @@ -84,14 +85,21 @@ public class BroadcastReplicationTests extends ESTestCase { @Before public void setUp() throws Exception { super.setUp(); - transport = new LocalTransport(Settings.EMPTY, threadPool, Version.CURRENT, new NamedWriteableRegistry()); - clusterService = new TestClusterService(threadPool); + LocalTransport transport = new LocalTransport(Settings.EMPTY, threadPool, Version.CURRENT, new NamedWriteableRegistry()); + clusterService = createClusterService(threadPool); transportService = new TransportService(transport, threadPool); transportService.start(); transportService.acceptIncomingRequests(); broadcastReplicationAction = new TestBroadcastReplicationAction(Settings.EMPTY, threadPool, clusterService, transportService, new ActionFilters(new HashSet()), new IndexNameExpressionResolver(Settings.EMPTY), null); } + @After + public void tearDown() throws Exception { + super.tearDown(); + clusterService.close(); + transportService.close(); + } + @AfterClass public static void afterClass() { ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); @@ -100,7 +108,7 @@ public class BroadcastReplicationTests extends ESTestCase { public void testNotStartedPrimary() throws InterruptedException, ExecutionException, IOException { final String index = "test"; - clusterService.setState(state(index, randomBoolean(), + setState(clusterService, state(index, randomBoolean(), randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.UNASSIGNED, ShardRoutingState.UNASSIGNED)); logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); Future response = (broadcastReplicationAction.execute(new DummyBroadcastRequest().indices(index))); @@ -119,7 +127,7 @@ public class BroadcastReplicationTests extends ESTestCase { public void testStartedPrimary() throws InterruptedException, ExecutionException, IOException { final String index = "test"; - clusterService.setState(state(index, randomBoolean(), + setState(clusterService, state(index, randomBoolean(), ShardRoutingState.STARTED)); logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); Future response = (broadcastReplicationAction.execute(new DummyBroadcastRequest().indices(index))); @@ -135,7 +143,7 @@ public class BroadcastReplicationTests extends ESTestCase { public void testResultCombine() throws InterruptedException, ExecutionException, IOException { final String index = "test"; int numShards = randomInt(3); - clusterService.setState(stateWithAssignedPrimariesAndOneReplica(index, numShards)); + setState(clusterService, stateWithAssignedPrimariesAndOneReplica(index, numShards)); logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); Future response = (broadcastReplicationAction.execute(new DummyBroadcastRequest().indices(index))); int succeeded = 0; @@ -165,7 +173,7 @@ public class BroadcastReplicationTests extends ESTestCase { } public void testNoShards() throws InterruptedException, ExecutionException, IOException { - clusterService.setState(stateWithNoShard()); + setState(clusterService, stateWithNoShard()); logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); BroadcastResponse response = executeAndAssertImmediateResponse(broadcastReplicationAction, new DummyBroadcastRequest()); assertBroadcastResponse(0, 0, 0, response, null); @@ -186,8 +194,8 @@ public class BroadcastReplicationTests extends ESTestCase { protected final Set>> capturedShardRequests = ConcurrentCollections.newConcurrentSet(); public TestBroadcastReplicationAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, - TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - TransportReplicationAction replicatedBroadcastShardAction) { + TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + TransportReplicationAction replicatedBroadcastShardAction) { super("test-broadcast-replication-action", DummyBroadcastRequest::new, settings, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, replicatedBroadcastShardAction); } @@ -204,7 +212,7 @@ public class BroadcastReplicationTests extends ESTestCase { @Override protected BroadcastResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, - List shardFailures) { + List shardFailures) { return new BroadcastResponse(totalNumCopies, successfulShards, failedShards, shardFailures); } diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index 3fc33477746..1fc94dcb533 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -26,7 +26,6 @@ import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.NotMasterException; import org.elasticsearch.cluster.action.shard.ShardStateAction; @@ -44,6 +43,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; @@ -57,7 +57,6 @@ import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESAllocationTestCase; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.cluster.TestClusterService; import org.elasticsearch.test.transport.CapturingTransport; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportChannel; @@ -65,6 +64,7 @@ import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportResponseOptions; import org.elasticsearch.transport.TransportService; import org.hamcrest.Matcher; +import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -86,6 +86,8 @@ import java.util.function.Consumer; import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.state; import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.stateWithActivePrimary; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.createClusterService; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.setState; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.Matchers.arrayWithSize; @@ -102,7 +104,7 @@ public class TransportReplicationActionTests extends ESTestCase { private static ThreadPool threadPool; - private TestClusterService clusterService; + private ClusterService clusterService; private TransportService transportService; private CapturingTransport transport; private Action action; @@ -121,7 +123,7 @@ public class TransportReplicationActionTests extends ESTestCase { public void setUp() throws Exception { super.setUp(); transport = new CapturingTransport(); - clusterService = new TestClusterService(threadPool); + clusterService = createClusterService(threadPool); transportService = new TransportService(transport, threadPool); transportService.start(); transportService.acceptIncomingRequests(); @@ -129,6 +131,12 @@ public class TransportReplicationActionTests extends ESTestCase { count.set(1); } + @After + public void tearDown() throws Exception { + super.tearDown(); + clusterService.close(); + } + @AfterClass public static void afterClass() { ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); @@ -151,7 +159,7 @@ public class TransportReplicationActionTests extends ESTestCase { ClusterBlocks.Builder block = ClusterBlocks.builder() .addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); - clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block)); + setState(clusterService, ClusterState.builder(clusterService.state()).blocks(block)); TransportReplicationAction.ReroutePhase reroutePhase = action.new ReroutePhase(task, request, listener); reroutePhase.run(); assertListenerThrows("primary phase should fail operation", listener, ClusterBlockException.class); @@ -159,7 +167,7 @@ public class TransportReplicationActionTests extends ESTestCase { block = ClusterBlocks.builder() .addGlobalBlock(new ClusterBlock(1, "retryable", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); - clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block)); + setState(clusterService, ClusterState.builder(clusterService.state()).blocks(block)); listener = new PlainActionFuture<>(); reroutePhase = action.new ReroutePhase(task, new Request().timeout("5ms"), listener); reroutePhase.run(); @@ -174,7 +182,7 @@ public class TransportReplicationActionTests extends ESTestCase { block = ClusterBlocks.builder() .addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); - clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block)); + setState(clusterService, ClusterState.builder(clusterService.state()).blocks(block)); assertListenerThrows("primary phase should fail operation when moving from a retryable block to a non-retryable one", listener, ClusterBlockException.class); assertIndexShardUninitialized(); } @@ -187,7 +195,7 @@ public class TransportReplicationActionTests extends ESTestCase { final String index = "test"; final ShardId shardId = new ShardId(index, "_na_", 0); // no replicas in oder to skip the replication part - clusterService.setState(state(index, true, + setState(clusterService, state(index, true, randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.UNASSIGNED)); ReplicationTask task = maybeTask(); @@ -207,13 +215,13 @@ public class TransportReplicationActionTests extends ESTestCase { assertFalse("unassigned primary didn't cause a retry", listener.isDone()); assertPhase(task, "waiting_for_retry"); - clusterService.setState(state(index, true, ShardRoutingState.STARTED)); + setState(clusterService, state(index, true, ShardRoutingState.STARTED)); logger.debug("--> primary assigned state:\n{}", clusterService.state().prettyPrint()); final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id()); final String primaryNodeId = shardRoutingTable.primaryShard().currentNodeId(); final List capturedRequests = - transport.getCapturedRequestsByTargetNodeAndClear().get(primaryNodeId); + transport.getCapturedRequestsByTargetNodeAndClear().get(primaryNodeId); assertThat(capturedRequests, notNullValue()); assertThat(capturedRequests.size(), equalTo(1)); assertThat(capturedRequests.get(0).action, equalTo("testAction[p]")); @@ -236,7 +244,7 @@ public class TransportReplicationActionTests extends ESTestCase { ClusterState state = state(index, true, ShardRoutingState.RELOCATING); String relocationTargetNode = state.getRoutingTable().shardRoutingTable(shardId).primaryShard().relocatingNodeId(); state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(relocationTargetNode)).build(); - clusterService.setState(state); + setState(clusterService, state); logger.debug("--> relocation ongoing state:\n{}", clusterService.state().prettyPrint()); Request request = new Request(shardId).timeout("1ms").routedBasedOnClusterVersion(clusterService.state().version() + 1); @@ -257,13 +265,13 @@ public class TransportReplicationActionTests extends ESTestCase { RoutingAllocation.Result result = allocationService.applyStartedShards(state, Arrays.asList(relocationTarget)); ClusterState updatedState = ClusterState.builder(clusterService.state()).routingResult(result).build(); - clusterService.setState(updatedState); + setState(clusterService, updatedState); logger.debug("--> relocation complete state:\n{}", clusterService.state().prettyPrint()); IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id()); final String primaryNodeId = shardRoutingTable.primaryShard().currentNodeId(); final List capturedRequests = - transport.getCapturedRequestsByTargetNodeAndClear().get(primaryNodeId); + transport.getCapturedRequestsByTargetNodeAndClear().get(primaryNodeId); assertThat(capturedRequests, notNullValue()); assertThat(capturedRequests.size(), equalTo(1)); assertThat(capturedRequests.get(0).action, equalTo("testAction[p]")); @@ -273,7 +281,7 @@ public class TransportReplicationActionTests extends ESTestCase { public void testUnknownIndexOrShardOnReroute() throws InterruptedException { final String index = "test"; // no replicas in oder to skip the replication part - clusterService.setState(state(index, true, + setState(clusterService, state(index, true, randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.UNASSIGNED)); logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); Request request = new Request(new ShardId("unknown_index", "_na_", 0)).timeout("1ms"); @@ -296,7 +304,7 @@ public class TransportReplicationActionTests extends ESTestCase { final ShardId shardId = new ShardId(index, "_na_", 0); ReplicationTask task = maybeTask(); - clusterService.setState(stateWithActivePrimary(index, randomBoolean(), 3)); + setState(clusterService, stateWithActivePrimary(index, randomBoolean(), 3)); logger.debug("using state: \n{}", clusterService.state().prettyPrint()); final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id()); @@ -325,7 +333,7 @@ public class TransportReplicationActionTests extends ESTestCase { final String index = "test"; final ShardId shardId = new ShardId(index, "_na_", 0); ClusterState state = stateWithActivePrimary(index, true, randomInt(5)); - clusterService.setState(state); + setState(clusterService, state); Request request = new Request(shardId).timeout("1ms"); PlainActionFuture listener = new PlainActionFuture<>(); ReplicationTask task = maybeTask(); @@ -365,7 +373,7 @@ public class TransportReplicationActionTests extends ESTestCase { String primaryTargetNodeId = state.getRoutingTable().shardRoutingTable(shardId).primaryShard().relocatingNodeId(); // simulate execution of the primary phase on the relocation target node state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(primaryTargetNodeId)).build(); - clusterService.setState(state); + setState(clusterService, state); Request request = new Request(shardId).timeout("1ms"); PlainActionFuture listener = new PlainActionFuture<>(); ReplicationTask task = maybeTask(); @@ -387,7 +395,7 @@ public class TransportReplicationActionTests extends ESTestCase { final String index = "test"; final ShardId shardId = new ShardId(index, "_na_", 0); // start with no replicas - clusterService.setState(stateWithActivePrimary(index, true, 0)); + setState(clusterService, stateWithActivePrimary(index, true, 0)); logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); final ClusterState stateWithAddedReplicas = state(index, true, ShardRoutingState.STARTED, randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.STARTED); ReplicationTask task = maybeTask(); @@ -397,7 +405,7 @@ public class TransportReplicationActionTests extends ESTestCase { protected Tuple shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Exception { final Tuple operationOnPrimary = super.shardOperationOnPrimary(metaData, shardRequest); // add replicas after primary operation - ((TestClusterService) clusterService).setState(stateWithAddedReplicas); + setState(clusterService, stateWithAddedReplicas); logger.debug("--> state after primary operation:\n{}", clusterService.state().prettyPrint()); return operationOnPrimary; } @@ -422,7 +430,7 @@ public class TransportReplicationActionTests extends ESTestCase { final String index = "test"; final ShardId shardId = new ShardId(index, "_na_", 0); // start with a replica - clusterService.setState(state(index, true, ShardRoutingState.STARTED, randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.STARTED)); + setState(clusterService, state(index, true, ShardRoutingState.STARTED, randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.STARTED)); logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); final ClusterState stateWithRelocatingReplica = state(index, true, ShardRoutingState.STARTED, ShardRoutingState.RELOCATING); @@ -431,7 +439,7 @@ public class TransportReplicationActionTests extends ESTestCase { protected Tuple shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Exception { final Tuple operationOnPrimary = super.shardOperationOnPrimary(metaData, shardRequest); // set replica to relocating - ((TestClusterService) clusterService).setState(stateWithRelocatingReplica); + setState(clusterService, stateWithRelocatingReplica); logger.debug("--> state after primary operation:\n{}", clusterService.state().prettyPrint()); return operationOnPrimary; } @@ -447,7 +455,7 @@ public class TransportReplicationActionTests extends ESTestCase { ShardRouting relocatingReplicaShard = stateWithRelocatingReplica.getRoutingTable().shardRoutingTable(index, shardId.id()).replicaShards().get(0); Map> capturedRequestsByTargetNode = transport.getCapturedRequestsByTargetNodeAndClear(); assertPhase(task, "replicating"); - for (String node : new String[] {relocatingReplicaShard.currentNodeId(), relocatingReplicaShard.relocatingNodeId()}) { + for (String node : new String[]{relocatingReplicaShard.currentNodeId(), relocatingReplicaShard.relocatingNodeId()}) { List requests = capturedRequestsByTargetNode.get(node); assertThat(requests, notNullValue()); assertThat(requests.size(), equalTo(1)); @@ -458,7 +466,7 @@ public class TransportReplicationActionTests extends ESTestCase { public void testIndexDeletedAfterPrimaryOperation() { final String index = "test"; final ShardId shardId = new ShardId(index, "_na_", 0); - clusterService.setState(state(index, true, ShardRoutingState.STARTED, ShardRoutingState.STARTED)); + setState(clusterService, state(index, true, ShardRoutingState.STARTED, ShardRoutingState.STARTED)); logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); final ClusterState stateWithDeletedIndex = state(index + "_new", true, ShardRoutingState.STARTED, ShardRoutingState.RELOCATING); @@ -467,7 +475,7 @@ public class TransportReplicationActionTests extends ESTestCase { protected Tuple shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Exception { final Tuple operationOnPrimary = super.shardOperationOnPrimary(metaData, shardRequest); // delete index after primary op - ((TestClusterService) clusterService).setState(stateWithDeletedIndex); + setState(clusterService, stateWithDeletedIndex); logger.debug("--> state after primary operation:\n{}", clusterService.state().prettyPrint()); return operationOnPrimary; } @@ -519,7 +527,7 @@ public class TransportReplicationActionTests extends ESTestCase { replicaStates[i] = ShardRoutingState.UNASSIGNED; } - clusterService.setState(state(index, true, ShardRoutingState.STARTED, replicaStates)); + setState(clusterService, state(index, true, ShardRoutingState.STARTED, replicaStates)); logger.debug("using consistency level of [{}], assigned shards [{}], total shards [{}]. expecting op to [{}]. using state: \n{}", request.consistencyLevel(), 1 + assignedReplicas, 1 + assignedReplicas + unassignedReplicas, passesWriteConsistency ? "succeed" : "retry", clusterService.state().prettyPrint()); @@ -547,7 +555,7 @@ public class TransportReplicationActionTests extends ESTestCase { for (int i = 0; i < replicaStates.length; i++) { replicaStates[i] = ShardRoutingState.STARTED; } - clusterService.setState(state(index, true, ShardRoutingState.STARTED, replicaStates)); + setState(clusterService, state(index, true, ShardRoutingState.STARTED, replicaStates)); listener = new PlainActionFuture<>(); primaryPhase = action.new PrimaryPhase(task, request, createTransportChannel(listener)); primaryPhase.run(); @@ -567,7 +575,7 @@ public class TransportReplicationActionTests extends ESTestCase { // simulate execution of the replication phase on the relocation target node after relocation source was marked as relocated state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(primaryShard.relocatingNodeId())).build(); } - clusterService.setState(state); + setState(clusterService, state); final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id()); int assignedReplicas = 0; @@ -602,7 +610,7 @@ public class TransportReplicationActionTests extends ESTestCase { // simulate execution of the primary phase on the relocation target node state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(primaryShard.relocatingNodeId())).build(); } - clusterService.setState(state); + setState(clusterService, state); final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id()); int assignedReplicas = 0; @@ -646,7 +654,7 @@ public class TransportReplicationActionTests extends ESTestCase { HashMap nodesSentTo = new HashMap<>(); boolean executeOnReplica = - action.shouldExecuteReplication(clusterService.state().getMetaData().index(shardId.getIndex()).getSettings()); + action.shouldExecuteReplication(clusterService.state().getMetaData().index(shardId.getIndex()).getSettings()); for (CapturingTransport.CapturedRequest capturedRequest : capturedRequests) { // no duplicate requests Request replicationRequest = (Request) capturedRequest.request; @@ -703,7 +711,7 @@ public class TransportReplicationActionTests extends ESTestCase { // get the shard the request was sent to ShardRouting routing = clusterService.state().getRoutingNodes().node(capturedRequest.node.id()).get(request.shardId.id()); // and the shard that was requested to be failed - ShardStateAction.ShardRoutingEntry shardRoutingEntry = (ShardStateAction.ShardRoutingEntry)shardFailedRequest.request; + ShardStateAction.ShardRoutingEntry shardRoutingEntry = (ShardStateAction.ShardRoutingEntry) shardFailedRequest.request; // the shard the request was sent to and the shard to be failed should be the same assertEquals(shardRoutingEntry.getShardRouting(), routing); failures.add(shardFailedRequest); @@ -714,7 +722,7 @@ public class TransportReplicationActionTests extends ESTestCase { CapturingTransport.CapturedRequest currentRequest = shardFailedRequest; for (int retryNumber = 0; retryNumber < numberOfRetries; retryNumber++) { // force a new cluster state to simulate a new master having been elected - clusterService.setState(ClusterState.builder(clusterService.state())); + setState(clusterService, ClusterState.builder(clusterService.state())); transport.handleRemoteError(currentRequest.requestId, new NotMasterException("shard-failed-test")); CapturingTransport.CapturedRequest[] retryRequests = transport.getCapturedRequestsAndClear(); assertEquals(1, retryRequests.length); @@ -765,7 +773,7 @@ public class TransportReplicationActionTests extends ESTestCase { final String index = "test"; final ShardId shardId = new ShardId(index, "_na_", 0); // no replica, we only want to test on primary - clusterService.setState(state(index, true, ShardRoutingState.STARTED)); + setState(clusterService, state(index, true, ShardRoutingState.STARTED)); logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); Request request = new Request(shardId).timeout("100ms"); PlainActionFuture listener = new PlainActionFuture<>(); @@ -805,7 +813,7 @@ public class TransportReplicationActionTests extends ESTestCase { final String index = "test"; final ShardId shardId = new ShardId(index, "_na_", 0); // one replica to make sure replication is attempted - clusterService.setState(state(index, true, + setState(clusterService, state(index, true, ShardRoutingState.STARTED, ShardRoutingState.STARTED)); ShardRouting primaryShard = clusterService.state().routingTable().shardRoutingTable(shardId).primaryShard(); indexShardRouting.set(primaryShard); @@ -842,7 +850,7 @@ public class TransportReplicationActionTests extends ESTestCase { public void testReplicasCounter() throws Exception { final ShardId shardId = new ShardId("test", "_na_", 0); - clusterService.setState(state(shardId.getIndexName(), true, + setState(clusterService, state(shardId.getIndexName(), true, ShardRoutingState.STARTED, ShardRoutingState.STARTED)); action = new ActionWithDelay(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, threadPool); final Action.ReplicaOperationTransportHandler replicaOperationTransportHandler = action.new ReplicaOperationTransportHandler(); @@ -881,7 +889,7 @@ public class TransportReplicationActionTests extends ESTestCase { action = new ActionWithExceptions(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, threadPool); final String index = "test"; final ShardId shardId = new ShardId(index, "_na_", 0); - clusterService.setState(state(index, true, + setState(clusterService, state(index, true, ShardRoutingState.STARTED, ShardRoutingState.STARTED)); logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); Request request = new Request(shardId).timeout("100ms"); @@ -901,7 +909,7 @@ public class TransportReplicationActionTests extends ESTestCase { final String index = "test"; final ShardId shardId = new ShardId(index, "_na_", 0); boolean localPrimary = true; - clusterService.setState(state(index, localPrimary, + setState(clusterService, state(index, localPrimary, ShardRoutingState.STARTED, ShardRoutingState.STARTED)); Action action = new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) { @Override @@ -953,8 +961,8 @@ public class TransportReplicationActionTests extends ESTestCase { // publish a new cluster state boolean localPrimaryOnRetry = randomBoolean(); - clusterService.setState(state(index, localPrimaryOnRetry, - ShardRoutingState.STARTED, ShardRoutingState.STARTED)); + setState(clusterService, state(index, localPrimaryOnRetry, + ShardRoutingState.STARTED, ShardRoutingState.STARTED)); CapturingTransport.CapturedRequest[] primaryRetry = transport.getCapturedRequestsAndClear(); // the request should be retried @@ -1065,7 +1073,7 @@ public class TransportReplicationActionTests extends ESTestCase { ClusterService clusterService, ThreadPool threadPool) { super(settings, actionName, transportService, clusterService, null, threadPool, - new ShardStateAction(settings, clusterService, transportService, null, null, threadPool), null, + new ShardStateAction(settings, clusterService, transportService, null, null, threadPool), new ActionFilters(new HashSet()), new IndexNameExpressionResolver(Settings.EMPTY), Request::new, Request::new, ThreadPool.Names.SAME); } @@ -1190,7 +1198,8 @@ public class TransportReplicationActionTests extends ESTestCase { * Transport channel that is needed for replica operation testing. */ public TransportChannel createTransportChannel(final PlainActionFuture listener) { - return createTransportChannel(listener, error -> {}); + return createTransportChannel(listener, error -> { + }); } public TransportChannel createTransportChannel(final PlainActionFuture listener, Consumer consumer) { diff --git a/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java index cf7b6745c8e..2dd31548cb9 100644 --- a/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java @@ -36,17 +36,18 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.cluster.TestClusterService; import org.elasticsearch.test.transport.CapturingTransport; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportService; +import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -59,13 +60,15 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.function.Supplier; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.createClusterService; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.setState; import static org.hamcrest.core.IsEqual.equalTo; public class TransportInstanceSingleOperationActionTests extends ESTestCase { private static ThreadPool THREAD_POOL; - private TestClusterService clusterService; + private ClusterService clusterService; private CapturingTransport transport; private TransportService transportService; @@ -113,7 +116,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase { @Override protected ShardIterator shards(ClusterState clusterState, Request request) { - return clusterState.routingTable().index(request.concreteIndex()).shard(request.shardId).primaryShardIt(); + return clusterState.routingTable().index(request.concreteIndex()).shard(request.shardId.getId()).primaryShardIt(); } } @@ -123,7 +126,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase { } @Override - public String[] concreteIndices(ClusterState state, IndicesRequest request) { + public String[] concreteIndexNames(ClusterState state, IndicesRequest request) { return request.indices(); } } @@ -137,7 +140,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase { public void setUp() throws Exception { super.setUp(); transport = new CapturingTransport(); - clusterService = new TestClusterService(THREAD_POOL); + clusterService = createClusterService(THREAD_POOL); transportService = new TransportService(transport, THREAD_POOL); transportService.start(); transportService.acceptIncomingRequests(); @@ -151,6 +154,13 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase { ); } + @After + public void tearDown() throws Exception { + super.tearDown(); + clusterService.close(); + transportService.close(); + } + @AfterClass public static void destroyThreadPool() { ThreadPool.terminate(THREAD_POOL, 30, TimeUnit.SECONDS); @@ -163,7 +173,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase { PlainActionFuture listener = new PlainActionFuture<>(); ClusterBlocks.Builder block = ClusterBlocks.builder() .addGlobalBlock(new ClusterBlock(1, "", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); - clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block)); + setState(clusterService, ClusterState.builder(clusterService.state()).blocks(block)); try { action.new AsyncSingleAction(request, listener).start(); listener.get(); @@ -178,9 +188,9 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase { public void testBasicRequestWorks() throws InterruptedException, ExecutionException, TimeoutException { Request request = new Request().index("test"); - request.shardId = 0; + request.shardId = new ShardId("test", "_na_", 0); PlainActionFuture listener = new PlainActionFuture<>(); - clusterService.setState(ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED)); + setState(clusterService, ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED)); action.new AsyncSingleAction(request, listener).start(); assertThat(transport.capturedRequests().length, equalTo(1)); transport.handleResponse(transport.capturedRequests()[0].requestId, new Response()); @@ -189,9 +199,9 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase { public void testFailureWithoutRetry() throws Exception { Request request = new Request().index("test"); - request.shardId = 0; + request.shardId = new ShardId("test", "_na_", 0); PlainActionFuture listener = new PlainActionFuture<>(); - clusterService.setState(ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED)); + setState(clusterService, ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED)); action.new AsyncSingleAction(request, listener).start(); assertThat(transport.capturedRequests().length, equalTo(1)); @@ -215,14 +225,14 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase { public void testSuccessAfterRetryWithClusterStateUpdate() throws Exception { Request request = new Request().index("test"); - request.shardId = 0; + request.shardId = new ShardId("test", "_na_", 0); PlainActionFuture listener = new PlainActionFuture<>(); boolean local = randomBoolean(); - clusterService.setState(ClusterStateCreationUtils.state("test", local, ShardRoutingState.INITIALIZING)); + setState(clusterService, ClusterStateCreationUtils.state("test", local, ShardRoutingState.INITIALIZING)); action.new AsyncSingleAction(request, listener).start(); // this should fail because primary not initialized assertThat(transport.capturedRequests().length, equalTo(0)); - clusterService.setState(ClusterStateCreationUtils.state("test", local, ShardRoutingState.STARTED)); + setState(clusterService, ClusterStateCreationUtils.state("test", local, ShardRoutingState.STARTED)); // this time it should work assertThat(transport.capturedRequests().length, equalTo(1)); transport.handleResponse(transport.capturedRequests()[0].requestId, new Response()); @@ -231,10 +241,10 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase { public void testSuccessAfterRetryWithExceptionFromTransport() throws Exception { Request request = new Request().index("test"); - request.shardId = 0; + request.shardId = new ShardId("test", "_na_", 0); PlainActionFuture listener = new PlainActionFuture<>(); boolean local = randomBoolean(); - clusterService.setState(ClusterStateCreationUtils.state("test", local, ShardRoutingState.STARTED)); + setState(clusterService, ClusterStateCreationUtils.state("test", local, ShardRoutingState.STARTED)); action.new AsyncSingleAction(request, listener).start(); assertThat(transport.capturedRequests().length, equalTo(1)); long requestId = transport.capturedRequests()[0].requestId; @@ -242,7 +252,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase { DiscoveryNode node = clusterService.state().getNodes().getLocalNode(); transport.handleLocalError(requestId, new ConnectTransportException(node, "test exception")); // trigger cluster state observer - clusterService.setState(ClusterStateCreationUtils.state("test", local, ShardRoutingState.STARTED)); + setState(clusterService, ClusterStateCreationUtils.state("test", local, ShardRoutingState.STARTED)); assertThat(transport.capturedRequests().length, equalTo(1)); transport.handleResponse(transport.capturedRequests()[0].requestId, new Response()); listener.get(); @@ -250,9 +260,9 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase { public void testRetryOfAnAlreadyTimedOutRequest() throws Exception { Request request = new Request().index("test").timeout(new TimeValue(0, TimeUnit.MILLISECONDS)); - request.shardId = 0; + request.shardId = new ShardId("test", "_na_", 0); PlainActionFuture listener = new PlainActionFuture<>(); - clusterService.setState(ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED)); + setState(clusterService, ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED)); action.new AsyncSingleAction(request, listener).start(); assertThat(transport.capturedRequests().length, equalTo(1)); long requestId = transport.capturedRequests()[0].requestId; @@ -299,9 +309,9 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase { } }; Request request = new Request().index("test"); - request.shardId = 0; + request.shardId = new ShardId("test", "_na_", 0); PlainActionFuture listener = new PlainActionFuture<>(); - clusterService.setState(ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED)); + setState(clusterService, ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED)); action.new AsyncSingleAction(request, listener).start(); assertThat(transport.capturedRequests().length, equalTo(0)); try { diff --git a/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java b/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java index bcb26613388..d105a4bf63b 100644 --- a/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java @@ -22,6 +22,7 @@ package org.elasticsearch.action.update; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -36,6 +37,7 @@ import org.elasticsearch.test.ESTestCase; import java.util.Map; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -179,4 +181,17 @@ public class UpdateRequestTests extends ESTestCase { assertThat(e.getMessage(), equalTo("Failed to derive xcontent")); } } + + // Related to issue 15338 + public void testFieldsParsing() throws Exception { + UpdateRequest request = new UpdateRequest("test", "type1", "1") + .source(new BytesArray("{\"doc\": {\"field1\": \"value1\"}, \"fields\": \"_source\"}")); + assertThat(request.doc().sourceAsMap().get("field1").toString(), equalTo("value1")); + assertThat(request.fields(), arrayContaining("_source")); + + request = new UpdateRequest("test", "type2", "2") + .source(new BytesArray("{\"doc\": {\"field2\": \"value2\"}, \"fields\": [\"field1\", \"field2\"]}")); + assertThat(request.doc().sourceAsMap().get("field2").toString(), equalTo("value2")); + assertThat(request.fields(), arrayContaining("field1", "field2")); + } } diff --git a/core/src/test/java/org/elasticsearch/bootstrap/BootstrapCheckTests.java b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapCheckTests.java index 45986eab00e..3c269c39004 100644 --- a/core/src/test/java/org/elasticsearch/bootstrap/BootstrapCheckTests.java +++ b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapCheckTests.java @@ -131,7 +131,7 @@ public class BootstrapCheckTests extends ESTestCase { } public void testMaxNumberOfThreadsCheck() { - final int limit = 1 << 15; + final int limit = 1 << 11; final AtomicLong maxNumberOfThreads = new AtomicLong(randomIntBetween(1, limit - 1)); final BootstrapCheck.MaxNumberOfThreadsCheck check = new BootstrapCheck.MaxNumberOfThreadsCheck() { @Override diff --git a/core/src/test/java/org/elasticsearch/bootstrap/ElasticsearchCliTests.java b/core/src/test/java/org/elasticsearch/bootstrap/ElasticsearchCliTests.java new file mode 100644 index 00000000000..51274af9a01 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/bootstrap/ElasticsearchCliTests.java @@ -0,0 +1,191 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.bootstrap; + +import org.elasticsearch.Build; +import org.elasticsearch.Version; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.MockTerminal; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.monitor.jvm.JvmInfo; +import org.elasticsearch.test.ESTestCase; +import org.junit.After; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Consumer; + +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.hasEntry; + +public class ElasticsearchCliTests extends ESTestCase { + + public void testVersion() throws Exception { + runTestThatVersionIsMutuallyExclusiveToOtherOptions("-V", "-d"); + runTestThatVersionIsMutuallyExclusiveToOtherOptions("-V", "--daemonize"); + runTestThatVersionIsMutuallyExclusiveToOtherOptions("-V", "-p", "/tmp/pid"); + runTestThatVersionIsMutuallyExclusiveToOtherOptions("-V", "--pidfile", "/tmp/pid"); + runTestThatVersionIsMutuallyExclusiveToOtherOptions("--version", "-d"); + runTestThatVersionIsMutuallyExclusiveToOtherOptions("--version", "--daemonize"); + runTestThatVersionIsMutuallyExclusiveToOtherOptions("--version", "-p", "/tmp/pid"); + runTestThatVersionIsMutuallyExclusiveToOtherOptions("--version", "--pidfile", "/tmp/pid"); + runTestThatVersionIsReturned("-V"); + runTestThatVersionIsReturned("--version"); + } + + private void runTestThatVersionIsMutuallyExclusiveToOtherOptions(String... args) throws Exception { + runTestVersion( + ExitCodes.USAGE, + output -> assertThat( + output, + containsString("ERROR: Elasticsearch version option is mutually exclusive with any other option")), + args); + } + + private void runTestThatVersionIsReturned(String... args) throws Exception { + runTestVersion(ExitCodes.OK, output -> { + assertThat(output, containsString("Version: " + Version.CURRENT.toString())); + assertThat(output, containsString("Build: " + Build.CURRENT.shortHash() + "/" + Build.CURRENT.date())); + assertThat(output, containsString("JVM: " + JvmInfo.jvmInfo().version())); + }, args); + } + + private void runTestVersion(int expectedStatus, Consumer outputConsumer, String... args) throws Exception { + runTest(expectedStatus, false, outputConsumer, (foreground, pidFile, esSettings) -> {}, args); + } + + public void testThatPidFileCanBeConfigured() throws Exception { + runPidFileTest(ExitCodes.USAGE, false, output -> assertThat(output, containsString("Option p/pidfile requires an argument")), "-p"); + runPidFileTest(ExitCodes.OK, true, output -> {}, "-p", "/tmp/pid"); + runPidFileTest(ExitCodes.OK, true, output -> {}, "--pidfile", "/tmp/pid"); + } + + private void runPidFileTest(final int expectedStatus, final boolean expectedInit, Consumer outputConsumer, final String... args) + throws Exception { + runTest( + expectedStatus, + expectedInit, + outputConsumer, + (foreground, pidFile, esSettings) -> assertThat(pidFile, equalTo("/tmp/pid")), + args); + } + + public void testThatParsingDaemonizeWorks() throws Exception { + runDaemonizeTest(true, "-d"); + runDaemonizeTest(true, "--daemonize"); + runDaemonizeTest(false); + } + + private void runDaemonizeTest(final boolean expectedDaemonize, final String... args) throws Exception { + runTest( + ExitCodes.OK, + true, + output -> {}, + (foreground, pidFile, esSettings) -> assertThat(foreground, equalTo(!expectedDaemonize)), + args); + } + + public void testElasticsearchSettings() throws Exception { + runTest( + ExitCodes.OK, + true, + output -> {}, + (foreground, pidFile, esSettings) -> { + assertThat(esSettings.size(), equalTo(2)); + assertThat(esSettings, hasEntry("es.foo", "bar")); + assertThat(esSettings, hasEntry("es.baz", "qux")); + }, + "-Ees.foo=bar", "-E", "es.baz=qux" + ); + } + + public void testElasticsearchSettingPrefix() throws Exception { + runElasticsearchSettingPrefixTest("-E", "foo"); + runElasticsearchSettingPrefixTest("-E", "foo=bar"); + runElasticsearchSettingPrefixTest("-E", "=bar"); + } + + private void runElasticsearchSettingPrefixTest(String... args) throws Exception { + runTest( + ExitCodes.USAGE, + false, + output -> assertThat(output, containsString("Elasticsearch settings must be prefixed with [es.] but was [")), + (foreground, pidFile, esSettings) -> {}, + args + ); + } + + public void testElasticsearchSettingCanNotBeEmpty() throws Exception { + runTest( + ExitCodes.USAGE, + false, + output -> assertThat(output, containsString("Elasticsearch setting [es.foo] must not be empty")), + (foreground, pidFile, esSettings) -> {}, + "-E", "es.foo=" + ); + } + + public void testUnknownOption() throws Exception { + runTest( + ExitCodes.USAGE, + false, + output -> assertThat(output, containsString("network.host is not a recognized option")), + (foreground, pidFile, esSettings) -> {}, + "--network.host"); + } + + private interface InitConsumer { + void accept(final boolean foreground, final String pidFile, final Map esSettings); + } + + private void runTest( + final int expectedStatus, + final boolean expectedInit, + final Consumer outputConsumer, + final InitConsumer initConsumer, + String... args) throws Exception { + final MockTerminal terminal = new MockTerminal(); + try { + final AtomicBoolean init = new AtomicBoolean(); + final int status = Elasticsearch.main(args, new Elasticsearch() { + @Override + void init(final boolean daemonize, final String pidFile, final Map esSettings) { + init.set(true); + initConsumer.accept(!daemonize, pidFile, esSettings); + } + }, terminal); + assertThat(status, equalTo(expectedStatus)); + assertThat(init.get(), equalTo(expectedInit)); + outputConsumer.accept(terminal.getOutput()); + } catch (Throwable t) { + // if an unexpected exception is thrown, we log + // terminal output to aid debugging + logger.info(terminal.getOutput()); + // rethrow so the test fails + throw t; + } + } + +} diff --git a/core/src/test/java/org/elasticsearch/bwcompat/BasicAnalysisBackwardCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/BasicAnalysisBackwardCompatibilityIT.java index 9a0316050b1..9ea9b340c20 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/BasicAnalysisBackwardCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/BasicAnalysisBackwardCompatibilityIT.java @@ -104,17 +104,8 @@ public class BasicAnalysisBackwardCompatibilityIT extends ESBackcompatTestCase { } private String randomAnalyzer() { - while(true) { - PreBuiltAnalyzers preBuiltAnalyzers = RandomPicks.randomFrom(getRandom(), PreBuiltAnalyzers.values()); - if (preBuiltAnalyzers == PreBuiltAnalyzers.SORANI && compatibilityVersion().before(Version.V_1_3_0)) { - continue; // SORANI was added in 1.3.0 - } - if (preBuiltAnalyzers == PreBuiltAnalyzers.LITHUANIAN && compatibilityVersion().before(Version.V_2_1_0)) { - continue; // LITHUANIAN was added in 2.1.0 - } - return preBuiltAnalyzers.name().toLowerCase(Locale.ROOT); - } - + PreBuiltAnalyzers preBuiltAnalyzers = RandomPicks.randomFrom(getRandom(), PreBuiltAnalyzers.values()); + return preBuiltAnalyzers.name().toLowerCase(Locale.ROOT); } private static final class InputOutput { @@ -127,7 +118,5 @@ public class BasicAnalysisBackwardCompatibilityIT extends ESBackcompatTestCase { this.input = input; this.field = field; } - - } } diff --git a/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityIT.java index 5b7c4fa37ba..ae739701593 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityIT.java @@ -188,14 +188,10 @@ public class BasicBackwardsCompatibilityIT extends ESBackcompatTestCase { docs[i] = client().prepareIndex("test", "type1", id).setSource("field1", English.intToEnglish(numDocs + i)); } indexRandom(true, docs); - if (compatibilityVersion().before(Version.V_1_3_0)) { - // issue another refresh through a new node to side step issue #6545 - assertNoFailures(backwardsCluster().internalCluster().dataNodeClient().admin().indices().prepareRefresh().setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute().get()); - } numDocs *= 2; } - logger.info(" --> waiting for relocation to complete", numDocs); + logger.info(" --> waiting for relocation of [{}] docs to complete", numDocs); ensureYellow("test");// move all shards to the new node (it waits on relocation) final int numIters = randomIntBetween(10, 20); for (int i = 0; i < numIters; i++) { diff --git a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java index b13cee98565..784813c3b4e 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java @@ -41,6 +41,7 @@ import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.IndexFolderUpgrader; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; @@ -105,6 +106,8 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { List indexes; List unsupportedIndexes; + static String singleDataPathNodeName; + static String multiDataPathNodeName; static Path singleDataPath; static Path[] multiDataPath; @@ -127,6 +130,8 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { @AfterClass public static void tearDownStatics() { + singleDataPathNodeName = null; + multiDataPathNodeName = null; singleDataPath = null; multiDataPath = null; } @@ -157,15 +162,17 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { InternalTestCluster.Async multiDataPathNode = internalCluster().startNodeAsync(nodeSettings.build()); // find single data path dir - Path[] nodePaths = internalCluster().getInstance(NodeEnvironment.class, singleDataPathNode.get()).nodeDataPaths(); + singleDataPathNodeName = singleDataPathNode.get(); + Path[] nodePaths = internalCluster().getInstance(NodeEnvironment.class, singleDataPathNodeName).nodeDataPaths(); assertEquals(1, nodePaths.length); singleDataPath = nodePaths[0].resolve(NodeEnvironment.INDICES_FOLDER); assertFalse(Files.exists(singleDataPath)); Files.createDirectories(singleDataPath); - logger.info("--> Single data path: " + singleDataPath.toString()); + logger.info("--> Single data path: {}", singleDataPath); // find multi data path dirs - nodePaths = internalCluster().getInstance(NodeEnvironment.class, multiDataPathNode.get()).nodeDataPaths(); + multiDataPathNodeName = multiDataPathNode.get(); + nodePaths = internalCluster().getInstance(NodeEnvironment.class, multiDataPathNodeName).nodeDataPaths(); assertEquals(2, nodePaths.length); multiDataPath = new Path[] {nodePaths[0].resolve(NodeEnvironment.INDICES_FOLDER), nodePaths[1].resolve(NodeEnvironment.INDICES_FOLDER)}; @@ -173,11 +180,18 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { assertFalse(Files.exists(multiDataPath[1])); Files.createDirectories(multiDataPath[0]); Files.createDirectories(multiDataPath[1]); - logger.info("--> Multi data paths: " + multiDataPath[0].toString() + ", " + multiDataPath[1].toString()); + logger.info("--> Multi data paths: {}, {}", multiDataPath[0], multiDataPath[1]); replicas.get(); // wait for replicas } + void upgradeIndexFolder() throws Exception { + final NodeEnvironment nodeEnvironment = internalCluster().getInstance(NodeEnvironment.class, singleDataPathNodeName); + IndexFolderUpgrader.upgradeIndicesIfNeeded(Settings.EMPTY, nodeEnvironment); + final NodeEnvironment nodeEnv = internalCluster().getInstance(NodeEnvironment.class, multiDataPathNodeName); + IndexFolderUpgrader.upgradeIndicesIfNeeded(Settings.EMPTY, nodeEnv); + } + String loadIndex(String indexFile) throws Exception { Path unzipDir = createTempDir(); Path unzipDataDir = unzipDir.resolve("data"); @@ -239,13 +253,13 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { if (file.getFileName().toString().equals(IndexWriter.WRITE_LOCK_NAME)) { // skip lock file, we don't need it - logger.trace("Skipping lock file: " + file.toString()); + logger.trace("Skipping lock file: {}", file); return FileVisitResult.CONTINUE; } Path relativeFile = src.relativize(file); Path destFile = destinationDataPath.resolve(indexName).resolve(relativeFile); - logger.trace("--> Moving " + relativeFile.toString() + " to " + destFile.toString()); + logger.trace("--> Moving {} to {}", relativeFile, destFile); Files.move(file, destFile); assertFalse(Files.exists(file)); assertTrue(Files.exists(destFile)); @@ -269,7 +283,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { for (String index : indexes) { if (expectedVersions.remove(index) == false) { - logger.warn("Old indexes tests contain extra index: " + index); + logger.warn("Old indexes tests contain extra index: {}", index); } } if (expectedVersions.isEmpty() == false) { @@ -287,15 +301,19 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { Collections.shuffle(indexes, random()); for (String index : indexes) { long startTime = System.currentTimeMillis(); - logger.info("--> Testing old index " + index); + logger.info("--> Testing old index {}", index); assertOldIndexWorks(index); - logger.info("--> Done testing " + index + ", took " + ((System.currentTimeMillis() - startTime) / 1000.0) + " seconds"); + logger.info("--> Done testing {}, took {} seconds", index, (System.currentTimeMillis() - startTime) / 1000.0); } } void assertOldIndexWorks(String index) throws Exception { Version version = extractVersion(index); String indexName = loadIndex(index); + // we explicitly upgrade the index folders as these indices + // are imported as dangling indices and not available on + // node startup + upgradeIndexFolder(); importIndex(indexName); assertIndexSanity(indexName, version); assertBasicSearchWorks(indexName); @@ -344,7 +362,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { SearchResponse searchRsp = searchReq.get(); ElasticsearchAssertions.assertNoFailures(searchRsp); long numDocs = searchRsp.getHits().getTotalHits(); - logger.info("Found " + numDocs + " in old index"); + logger.info("Found {} in old index", numDocs); logger.info("--> testing basic search with sort"); searchReq.addSort("long_sort", SortOrder.ASC); @@ -446,7 +464,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { // #10067: create-bwc-index.py deleted any doc with long_sort:[10-20] void assertDeleteByQueryWorked(String indexName, Version version) throws Exception { - if (version.onOrBefore(Version.V_1_0_0_Beta2) || version.onOrAfter(Version.V_2_0_0_beta1)) { + if (version.onOrAfter(Version.V_2_0_0_beta1)) { // TODO: remove this once #10262 is fixed return; } @@ -523,7 +541,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { for (String indexFile : indexes) { String indexName = indexFile.replace(".zip", "").toLowerCase(Locale.ROOT).replace("unsupported-", "index-"); Path nodeDir = getNodeDir(indexFile); - logger.info("Parsing cluster state files from index [" + indexName + "]"); + logger.info("Parsing cluster state files from index [{}]", indexName); assertNotNull(globalFormat.loadLatestState(logger, nodeDir)); // no exception Path indexDir = nodeDir.resolve("indices").resolve(indexName); assertNotNull(indexFormat.loadLatestState(logger, indexDir)); // no exception diff --git a/core/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java b/core/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java index 23163b86112..9fe83f65c45 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java @@ -28,7 +28,7 @@ public class RecoveryWithUnsupportedIndicesIT extends StaticIndexBackwardCompati public void testUpgradeStartClusterOn_0_20_6() throws Exception { String indexName = "unsupported-0.20.6"; - logger.info("Checking static index " + indexName); + logger.info("Checking static index {}", indexName); Settings nodeSettings = prepareBackwardsDataDir(getBwcIndicesPath().resolve(indexName + ".zip"), NetworkModule.HTTP_ENABLED.getKey(), true); try { internalCluster().startNode(nodeSettings); diff --git a/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java b/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java index ec73edd493f..5b81621e6dd 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java @@ -108,7 +108,7 @@ public class RestoreBackwardsCompatIT extends AbstractSnapshotIntegTestCase { for (String repoVersion : repoVersions) { if (expectedVersions.remove(repoVersion) == false) { - logger.warn("Old repositories tests contain extra repo: " + repoVersion); + logger.warn("Old repositories tests contain extra repo: {}", repoVersion); } } if (expectedVersions.isEmpty() == false) { @@ -194,14 +194,11 @@ public class RestoreBackwardsCompatIT extends AbstractSnapshotIntegTestCase { assertThat(template.settings().getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, -1), equalTo(1)); assertThat(template.mappings().size(), equalTo(1)); assertThat(template.mappings().get("type1").string(), equalTo("{\"type1\":{\"_source\":{\"enabled\":false}}}")); - if (Version.fromString(version).onOrAfter(Version.V_1_1_0)) { - // Support for aliases in templates was added in v1.1.0 - assertThat(template.aliases().size(), equalTo(3)); - assertThat(template.aliases().get("alias1"), notNullValue()); - assertThat(template.aliases().get("alias2").filter().string(), containsString(version)); - assertThat(template.aliases().get("alias2").indexRouting(), equalTo("kimchy")); - assertThat(template.aliases().get("{index}-alias"), notNullValue()); - } + assertThat(template.aliases().size(), equalTo(3)); + assertThat(template.aliases().get("alias1"), notNullValue()); + assertThat(template.aliases().get("alias2").filter().string(), containsString(version)); + assertThat(template.aliases().get("alias2").indexRouting(), equalTo("kimchy")); + assertThat(template.aliases().get("{index}-alias"), notNullValue()); logger.info("--> cleanup"); cluster().wipeIndices(restoreInfo.indices().toArray(new String[restoreInfo.indices().size()])); diff --git a/core/src/test/java/org/elasticsearch/bwcompat/StaticIndexBackwardCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/StaticIndexBackwardCompatibilityIT.java index 794aea85487..3884d3475e1 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/StaticIndexBackwardCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/StaticIndexBackwardCompatibilityIT.java @@ -36,7 +36,7 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; public class StaticIndexBackwardCompatibilityIT extends ESIntegTestCase { public void loadIndex(String index, Object... settings) throws Exception { - logger.info("Checking static index " + index); + logger.info("Checking static index {}", index); Settings nodeSettings = prepareBackwardsDataDir(getDataPath(index + ".zip"), settings); internalCluster().startNode(nodeSettings); ensureGreen(index); diff --git a/core/src/test/java/org/elasticsearch/cli/CommandTests.java b/core/src/test/java/org/elasticsearch/cli/CommandTests.java new file mode 100644 index 00000000000..153bd4600b9 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/cli/CommandTests.java @@ -0,0 +1,123 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cli; + +import joptsimple.OptionSet; +import org.elasticsearch.test.ESTestCase; + +public class CommandTests extends ESTestCase { + + static class UserErrorCommand extends Command { + UserErrorCommand() { + super("Throws a user error"); + } + @Override + protected void execute(Terminal terminal, OptionSet options) throws Exception { + throw new UserError(ExitCodes.DATA_ERROR, "Bad input"); + } + } + + static class NoopCommand extends Command { + boolean executed = false; + NoopCommand() { + super("Does nothing"); + } + @Override + protected void execute(Terminal terminal, OptionSet options) throws Exception { + terminal.println("Normal output"); + terminal.println(Terminal.Verbosity.SILENT, "Silent output"); + terminal.println(Terminal.Verbosity.VERBOSE, "Verbose output"); + executed = true; + } + @Override + protected void printAdditionalHelp(Terminal terminal) { + terminal.println("Some extra help"); + } + } + + public void testHelp() throws Exception { + NoopCommand command = new NoopCommand(); + MockTerminal terminal = new MockTerminal(); + String[] args = {"-h"}; + int status = command.main(args, terminal); + String output = terminal.getOutput(); + assertEquals(output, ExitCodes.OK, status); + assertTrue(output, output.contains("Does nothing")); + assertTrue(output, output.contains("Some extra help")); + assertFalse(command.executed); + + command = new NoopCommand(); + String[] args2 = {"--help"}; + status = command.main(args2, terminal); + output = terminal.getOutput(); + assertEquals(output, ExitCodes.OK, status); + assertTrue(output, output.contains("Does nothing")); + assertTrue(output, output.contains("Some extra help")); + assertFalse(command.executed); + } + + public void testVerbositySilentAndVerbose() throws Exception { + MockTerminal terminal = new MockTerminal(); + NoopCommand command = new NoopCommand(); + String[] args = {"-v", "-s"}; + UserError e = expectThrows(UserError.class, () -> { + command.mainWithoutErrorHandling(args, terminal); + }); + assertTrue(e.getMessage(), e.getMessage().contains("Cannot specify -s and -v together")); + } + + public void testSilentVerbosity() throws Exception { + MockTerminal terminal = new MockTerminal(); + NoopCommand command = new NoopCommand(); + String[] args = {"-s"}; + command.main(args, terminal); + String output = terminal.getOutput(); + assertTrue(output, output.contains("Silent output")); + } + + public void testNormalVerbosity() throws Exception { + MockTerminal terminal = new MockTerminal(); + terminal.setVerbosity(Terminal.Verbosity.SILENT); + NoopCommand command = new NoopCommand(); + String[] args = {}; + command.main(args, terminal); + String output = terminal.getOutput(); + assertTrue(output, output.contains("Normal output")); + } + + public void testVerboseVerbosity() throws Exception { + MockTerminal terminal = new MockTerminal(); + NoopCommand command = new NoopCommand(); + String[] args = {"-v"}; + command.main(args, terminal); + String output = terminal.getOutput(); + assertTrue(output, output.contains("Verbose output")); + } + + public void testUserError() throws Exception { + MockTerminal terminal = new MockTerminal(); + UserErrorCommand command = new UserErrorCommand(); + String[] args = {}; + int status = command.main(args, terminal); + String output = terminal.getOutput(); + assertEquals(output, ExitCodes.DATA_ERROR, status); + assertTrue(output, output.contains("ERROR: Bad input")); + } +} diff --git a/core/src/test/java/org/elasticsearch/cli/MultiCommandTests.java b/core/src/test/java/org/elasticsearch/cli/MultiCommandTests.java new file mode 100644 index 00000000000..4f91d378440 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/cli/MultiCommandTests.java @@ -0,0 +1,105 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cli; + +import joptsimple.OptionSet; +import org.junit.Before; + +public class MultiCommandTests extends CommandTestCase { + + static class DummyMultiCommand extends MultiCommand { + DummyMultiCommand() { + super("A dummy multi command"); + } + } + + static class DummySubCommand extends Command { + DummySubCommand() { + super("A dummy subcommand"); + } + @Override + protected void execute(Terminal terminal, OptionSet options) throws Exception { + terminal.println("Arguments: " + options.nonOptionArguments().toString()); + } + } + + DummyMultiCommand multiCommand; + + @Before + public void setupCommand() { + multiCommand = new DummyMultiCommand(); + } + + @Override + protected Command newCommand() { + return multiCommand; + } + + public void testNoCommandsConfigured() throws Exception { + IllegalStateException e = expectThrows(IllegalStateException.class, () -> { + execute(); + }); + assertEquals("No subcommands configured", e.getMessage()); + } + + public void testUnknownCommand() throws Exception { + multiCommand.subcommands.put("something", new DummySubCommand()); + UserError e = expectThrows(UserError.class, () -> { + execute("somethingelse"); + }); + assertEquals(ExitCodes.USAGE, e.exitCode); + assertEquals("Unknown command [somethingelse]", e.getMessage()); + } + + public void testMissingCommand() throws Exception { + multiCommand.subcommands.put("command1", new DummySubCommand()); + UserError e = expectThrows(UserError.class, () -> { + execute(); + }); + assertEquals(ExitCodes.USAGE, e.exitCode); + assertEquals("Missing command", e.getMessage()); + } + + public void testHelp() throws Exception { + multiCommand.subcommands.put("command1", new DummySubCommand()); + multiCommand.subcommands.put("command2", new DummySubCommand()); + execute("-h"); + String output = terminal.getOutput(); + assertTrue(output, output.contains("command1")); + assertTrue(output, output.contains("command2")); + } + + public void testSubcommandHelp() throws Exception { + multiCommand.subcommands.put("command1", new DummySubCommand()); + multiCommand.subcommands.put("command2", new DummySubCommand()); + execute("command2", "-h"); + String output = terminal.getOutput(); + assertFalse(output, output.contains("command1")); + assertTrue(output, output.contains("A dummy subcommand")); + } + + public void testSubcommandArguments() throws Exception { + multiCommand.subcommands.put("command1", new DummySubCommand()); + execute("command1", "foo", "bar"); + String output = terminal.getOutput(); + assertFalse(output, output.contains("command1")); + assertTrue(output, output.contains("Arguments: [foo, bar]")); + } +} diff --git a/core/src/test/java/org/elasticsearch/common/cli/TerminalTests.java b/core/src/test/java/org/elasticsearch/cli/TerminalTests.java similarity index 63% rename from core/src/test/java/org/elasticsearch/common/cli/TerminalTests.java rename to core/src/test/java/org/elasticsearch/cli/TerminalTests.java index 0e71ac7cd6a..6673bdbc858 100644 --- a/core/src/test/java/org/elasticsearch/common/cli/TerminalTests.java +++ b/core/src/test/java/org/elasticsearch/cli/TerminalTests.java @@ -17,43 +17,45 @@ * under the License. */ -package org.elasticsearch.common.cli; +package org.elasticsearch.cli; -import static org.hamcrest.Matchers.hasItem; -import static org.hamcrest.Matchers.hasSize; +import org.elasticsearch.test.ESTestCase; -public class TerminalTests extends CliToolTestCase { +public class TerminalTests extends ESTestCase { public void testVerbosity() throws Exception { - CaptureOutputTerminal terminal = new CaptureOutputTerminal(Terminal.Verbosity.SILENT); + MockTerminal terminal = new MockTerminal(); + terminal.setVerbosity(Terminal.Verbosity.SILENT); assertPrinted(terminal, Terminal.Verbosity.SILENT, "text"); assertNotPrinted(terminal, Terminal.Verbosity.NORMAL, "text"); assertNotPrinted(terminal, Terminal.Verbosity.VERBOSE, "text"); - terminal = new CaptureOutputTerminal(Terminal.Verbosity.NORMAL); + terminal = new MockTerminal(); assertPrinted(terminal, Terminal.Verbosity.SILENT, "text"); assertPrinted(terminal, Terminal.Verbosity.NORMAL, "text"); assertNotPrinted(terminal, Terminal.Verbosity.VERBOSE, "text"); - terminal = new CaptureOutputTerminal(Terminal.Verbosity.VERBOSE); + terminal = new MockTerminal(); + terminal.setVerbosity(Terminal.Verbosity.VERBOSE); assertPrinted(terminal, Terminal.Verbosity.SILENT, "text"); assertPrinted(terminal, Terminal.Verbosity.NORMAL, "text"); assertPrinted(terminal, Terminal.Verbosity.VERBOSE, "text"); } public void testEscaping() throws Exception { - CaptureOutputTerminal terminal = new CaptureOutputTerminal(Terminal.Verbosity.NORMAL); + MockTerminal terminal = new MockTerminal(); assertPrinted(terminal, Terminal.Verbosity.NORMAL, "This message contains percent like %20n"); } - private void assertPrinted(CaptureOutputTerminal logTerminal, Terminal.Verbosity verbosity, String text) { + private void assertPrinted(MockTerminal logTerminal, Terminal.Verbosity verbosity, String text) throws Exception { logTerminal.println(verbosity, text); - assertEquals(1, logTerminal.getTerminalOutput().size()); - assertTrue(logTerminal.getTerminalOutput().get(0).contains(text)); - logTerminal.terminalOutput.clear(); + String output = logTerminal.getOutput(); + assertTrue(output, output.contains(text)); + logTerminal.reset(); } - private void assertNotPrinted(CaptureOutputTerminal logTerminal, Terminal.Verbosity verbosity, String text) { + private void assertNotPrinted(MockTerminal logTerminal, Terminal.Verbosity verbosity, String text) throws Exception { logTerminal.println(verbosity, text); - assertThat(logTerminal.getTerminalOutput(), hasSize(0)); + String output = logTerminal.getOutput(); + assertTrue(output, output.isEmpty()); } } diff --git a/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java b/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java index ebba75b10d8..5d3ac18259d 100644 --- a/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java +++ b/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java @@ -128,8 +128,8 @@ public class TransportClientHeadersTests extends AbstractClientHeadersTestCase { CountDownLatch clusterStateLatch = new CountDownLatch(1); @Inject - public InternalTransportService(Settings settings, Transport transport, ThreadPool threadPool, NamedWriteableRegistry namedWriteableRegistry) { - super(settings, transport, threadPool, namedWriteableRegistry); + public InternalTransportService(Settings settings, Transport transport, ThreadPool threadPool) { + super(settings, transport, threadPool); } @Override @SuppressWarnings("unchecked") diff --git a/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java b/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java index ca83deeef1b..5e67ac42d0e 100644 --- a/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java +++ b/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java @@ -71,7 +71,7 @@ public class TransportClientNodesServiceTests extends ESTestCase { return new TestResponse(); } }; - transportService = new TransportService(Settings.EMPTY, transport, threadPool, new NamedWriteableRegistry()); + transportService = new TransportService(Settings.EMPTY, transport, threadPool); transportService.start(); transportService.acceptIncomingRequests(); transportClientNodesService = new TransportClientNodesService(Settings.EMPTY, ClusterName.DEFAULT, transportService, threadPool, Version.CURRENT); diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java index 18b8685bafb..de0d5df928c 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.index.Index; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; @@ -37,6 +38,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; import static org.hamcrest.Matchers.equalTo; @@ -220,7 +222,7 @@ public class ClusterChangedEventTests extends ESTestCase { final ClusterState newState = nextState(previousState, changeClusterUUID, addedIndices, delIndices, 0); final ClusterChangedEvent event = new ClusterChangedEvent("_na_", newState, previousState); final List addsFromEvent = event.indicesCreated(); - final List delsFromEvent = event.indicesDeleted(); + final List delsFromEvent = event.indicesDeleted().stream().map((s) -> s.getName()).collect(Collectors.toList()); Collections.sort(addsFromEvent); Collections.sort(delsFromEvent); assertThat(addsFromEvent, equalTo(addedIndices)); diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java b/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java index 9a8e8fb7268..28b0b7e18cf 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster; import com.carrotsearch.hppc.cursors.ObjectCursor; - import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionModule; @@ -35,6 +34,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java index 24635a980a7..4a930bc9c28 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.common.inject.ModuleTestCase; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; @@ -48,19 +49,7 @@ public class ClusterModuleTests extends ModuleTestCase { static class FakeShardsAllocator implements ShardsAllocator { @Override - public void applyStartedShards(StartedRerouteAllocation allocation) {} - @Override - public void applyFailedShards(FailedRerouteAllocation allocation) {} - @Override - public boolean allocateUnassigned(RoutingAllocation allocation) { - return false; - } - @Override - public boolean rebalance(RoutingAllocation allocation) { - return false; - } - @Override - public boolean moveShards(RoutingAllocation allocation) { + public boolean allocate(RoutingAllocation allocation) { return false; } } @@ -83,7 +72,7 @@ public class ClusterModuleTests extends ModuleTestCase { public void testRegisterClusterDynamicSetting() { SettingsModule module = new SettingsModule(Settings.EMPTY); - module.registerSetting(Setting.boolSetting("foo.bar", false, true, Setting.Scope.CLUSTER)); + module.registerSetting(Setting.boolSetting("foo.bar", false, Property.Dynamic, Property.NodeScope)); assertInstanceBinding(module, ClusterSettings.class, service -> service.hasDynamicSetting("foo.bar")); } @@ -98,8 +87,8 @@ public class ClusterModuleTests extends ModuleTestCase { public void testRegisterIndexDynamicSetting() { SettingsModule module = new SettingsModule(Settings.EMPTY); - module.registerSetting(Setting.boolSetting("foo.bar", false, true, Setting.Scope.INDEX)); - assertInstanceBinding(module, IndexScopedSettings.class, service -> service.hasDynamicSetting("foo.bar")); + module.registerSetting(Setting.boolSetting("index.foo.bar", false, Property.Dynamic, Property.IndexScope)); + assertInstanceBinding(module, IndexScopedSettings.class, service -> service.hasDynamicSetting("index.foo.bar")); } public void testRegisterAllocationDeciderDuplicate() { diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java b/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java deleted file mode 100644 index 017a46a5462..00000000000 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java +++ /dev/null @@ -1,1351 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.cluster; - -import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.InternalClusterService; -import org.elasticsearch.cluster.service.PendingClusterTask; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Priority; -import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.component.LifecycleComponent; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.inject.Singleton; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.discovery.zen.ZenDiscovery; -import org.elasticsearch.node.Node; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.ESIntegTestCase.ClusterScope; -import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.test.InternalTestCluster; -import org.elasticsearch.test.MockLogAppender; -import org.elasticsearch.test.junit.annotations.TestLogging; -import org.elasticsearch.threadpool.ThreadPool; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.BrokenBarrierException; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.CyclicBarrier; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; - -import static org.elasticsearch.common.settings.Settings.settingsBuilder; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; - -/** - * - */ -@ClusterScope(scope = Scope.TEST, numDataNodes = 0) -@ESIntegTestCase.SuppressLocalMode -public class ClusterServiceIT extends ESIntegTestCase { - - @Override - protected Collection> nodePlugins() { - return pluginList(TestPlugin.class); - } - - public void testTimeoutUpdateTask() throws Exception { - Settings settings = settingsBuilder() - .put("discovery.type", "local") - .build(); - internalCluster().startNode(settings); - ClusterService clusterService1 = internalCluster().getInstance(ClusterService.class); - final CountDownLatch block = new CountDownLatch(1); - clusterService1.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) { - try { - block.await(); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - return currentState; - } - - @Override - public void onFailure(String source, Throwable t) { - throw new RuntimeException(t); - } - }); - - final CountDownLatch timedOut = new CountDownLatch(1); - final AtomicBoolean executeCalled = new AtomicBoolean(); - clusterService1.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { - @Override - public TimeValue timeout() { - return TimeValue.timeValueMillis(2); - } - - @Override - public void onFailure(String source, Throwable t) { - timedOut.countDown(); - } - - @Override - public ClusterState execute(ClusterState currentState) { - executeCalled.set(true); - return currentState; - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - } - }); - - timedOut.await(); - block.countDown(); - final CountDownLatch allProcessed = new CountDownLatch(1); - clusterService1.submitStateUpdateTask("test3", new ClusterStateUpdateTask() { - @Override - public void onFailure(String source, Throwable t) { - throw new RuntimeException(t); - } - - @Override - public ClusterState execute(ClusterState currentState) { - allProcessed.countDown(); - return currentState; - } - - }); - allProcessed.await(); // executed another task to double check that execute on the timed out update task is not called... - assertThat(executeCalled.get(), equalTo(false)); - } - - public void testAckedUpdateTask() throws Exception { - Settings settings = settingsBuilder() - .put("discovery.type", "local") - .build(); - internalCluster().startNode(settings); - ClusterService clusterService = internalCluster().getInstance(ClusterService.class); - - final AtomicBoolean allNodesAcked = new AtomicBoolean(false); - final AtomicBoolean ackTimeout = new AtomicBoolean(false); - final AtomicBoolean onFailure = new AtomicBoolean(false); - final AtomicBoolean executed = new AtomicBoolean(false); - final CountDownLatch latch = new CountDownLatch(1); - final CountDownLatch processedLatch = new CountDownLatch(1); - clusterService.submitStateUpdateTask("test", new AckedClusterStateUpdateTask(null, null) { - @Override - protected Void newResponse(boolean acknowledged) { - return null; - } - - @Override - public boolean mustAck(DiscoveryNode discoveryNode) { - return true; - } - - @Override - public void onAllNodesAcked(@Nullable Throwable t) { - allNodesAcked.set(true); - latch.countDown(); - } - - @Override - public void onAckTimeout() { - ackTimeout.set(true); - latch.countDown(); - } - - @Override - public TimeValue ackTimeout() { - return TimeValue.timeValueSeconds(10); - } - - @Override - public TimeValue timeout() { - return TimeValue.timeValueSeconds(10); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - processedLatch.countDown(); - } - - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - executed.set(true); - return ClusterState.builder(currentState).build(); - } - - @Override - public void onFailure(String source, Throwable t) { - logger.error("failed to execute callback in test {}", t, source); - onFailure.set(true); - latch.countDown(); - } - }); - - ensureGreen(); - assertThat(latch.await(1, TimeUnit.SECONDS), equalTo(true)); - - assertThat(allNodesAcked.get(), equalTo(true)); - assertThat(ackTimeout.get(), equalTo(false)); - assertThat(executed.get(), equalTo(true)); - assertThat(onFailure.get(), equalTo(false)); - - assertThat(processedLatch.await(1, TimeUnit.SECONDS), equalTo(true)); - } - - public void testAckedUpdateTaskSameClusterState() throws Exception { - Settings settings = settingsBuilder() - .put("discovery.type", "local") - .build(); - internalCluster().startNode(settings); - ClusterService clusterService = internalCluster().getInstance(ClusterService.class); - - final AtomicBoolean allNodesAcked = new AtomicBoolean(false); - final AtomicBoolean ackTimeout = new AtomicBoolean(false); - final AtomicBoolean onFailure = new AtomicBoolean(false); - final AtomicBoolean executed = new AtomicBoolean(false); - final CountDownLatch latch = new CountDownLatch(1); - final CountDownLatch processedLatch = new CountDownLatch(1); - clusterService.submitStateUpdateTask("test", new AckedClusterStateUpdateTask(null, null) { - @Override - protected Void newResponse(boolean acknowledged) { - return null; - } - - @Override - public void onAllNodesAcked(@Nullable Throwable t) { - allNodesAcked.set(true); - latch.countDown(); - } - - @Override - public void onAckTimeout() { - ackTimeout.set(true); - latch.countDown(); - } - - @Override - public TimeValue ackTimeout() { - return TimeValue.timeValueSeconds(10); - } - - @Override - public TimeValue timeout() { - return TimeValue.timeValueSeconds(10); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - processedLatch.countDown(); - } - - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - executed.set(true); - return currentState; - } - - @Override - public void onFailure(String source, Throwable t) { - logger.error("failed to execute callback in test {}", t, source); - onFailure.set(true); - latch.countDown(); - } - }); - - ensureGreen(); - assertThat(latch.await(1, TimeUnit.SECONDS), equalTo(true)); - - assertThat(allNodesAcked.get(), equalTo(true)); - assertThat(ackTimeout.get(), equalTo(false)); - assertThat(executed.get(), equalTo(true)); - assertThat(onFailure.get(), equalTo(false)); - - assertThat(processedLatch.await(1, TimeUnit.SECONDS), equalTo(true)); - } - - public void testMasterAwareExecution() throws Exception { - Settings settings = settingsBuilder() - .put("discovery.type", "local") - .build(); - - InternalTestCluster.Async master = internalCluster().startNodeAsync(settings); - InternalTestCluster.Async nonMaster = internalCluster().startNodeAsync(settingsBuilder().put(settings).put(Node.NODE_MASTER_SETTING.getKey(), false).build()); - master.get(); - ensureGreen(); // make sure we have a cluster - - ClusterService clusterService = internalCluster().getInstance(ClusterService.class, nonMaster.get()); - - final boolean[] taskFailed = {false}; - final CountDownLatch latch1 = new CountDownLatch(1); - clusterService.submitStateUpdateTask("test", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - latch1.countDown(); - return currentState; - } - - @Override - public void onFailure(String source, Throwable t) { - taskFailed[0] = true; - latch1.countDown(); - } - }); - - latch1.await(); - assertTrue("cluster state update task was executed on a non-master", taskFailed[0]); - - taskFailed[0] = true; - final CountDownLatch latch2 = new CountDownLatch(1); - clusterService.submitStateUpdateTask("test", new ClusterStateUpdateTask() { - @Override - public boolean runOnlyOnMaster() { - return false; - } - - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - taskFailed[0] = false; - latch2.countDown(); - return currentState; - } - - @Override - public void onFailure(String source, Throwable t) { - taskFailed[0] = true; - latch2.countDown(); - } - }); - latch2.await(); - assertFalse("non-master cluster state update task was not executed", taskFailed[0]); - } - - public void testAckedUpdateTaskNoAckExpected() throws Exception { - Settings settings = settingsBuilder() - .put("discovery.type", "local") - .build(); - internalCluster().startNode(settings); - ClusterService clusterService = internalCluster().getInstance(ClusterService.class); - - final AtomicBoolean allNodesAcked = new AtomicBoolean(false); - final AtomicBoolean ackTimeout = new AtomicBoolean(false); - final AtomicBoolean onFailure = new AtomicBoolean(false); - final AtomicBoolean executed = new AtomicBoolean(false); - final CountDownLatch latch = new CountDownLatch(1); - clusterService.submitStateUpdateTask("test", new AckedClusterStateUpdateTask(null, null) { - @Override - protected Void newResponse(boolean acknowledged) { - return null; - } - - @Override - public boolean mustAck(DiscoveryNode discoveryNode) { - return false; - } - - @Override - public void onAllNodesAcked(@Nullable Throwable t) { - allNodesAcked.set(true); - latch.countDown(); - } - - @Override - public void onAckTimeout() { - ackTimeout.set(true); - latch.countDown(); - } - - @Override - public TimeValue ackTimeout() { - return TimeValue.timeValueSeconds(10); - } - - @Override - public TimeValue timeout() { - return TimeValue.timeValueSeconds(10); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - } - - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - executed.set(true); - return ClusterState.builder(currentState).build(); - } - - @Override - public void onFailure(String source, Throwable t) { - logger.error("failed to execute callback in test {}", t, source); - onFailure.set(true); - latch.countDown(); - } - }); - - ensureGreen(); - assertThat(latch.await(1, TimeUnit.SECONDS), equalTo(true)); - - assertThat(allNodesAcked.get(), equalTo(true)); - assertThat(ackTimeout.get(), equalTo(false)); - assertThat(executed.get(), equalTo(true)); - assertThat(onFailure.get(), equalTo(false)); - } - - public void testAckedUpdateTaskTimeoutZero() throws Exception { - Settings settings = settingsBuilder() - .put("discovery.type", "local") - .build(); - internalCluster().startNode(settings); - ClusterService clusterService = internalCluster().getInstance(ClusterService.class); - - final AtomicBoolean allNodesAcked = new AtomicBoolean(false); - final AtomicBoolean ackTimeout = new AtomicBoolean(false); - final AtomicBoolean onFailure = new AtomicBoolean(false); - final AtomicBoolean executed = new AtomicBoolean(false); - final CountDownLatch latch = new CountDownLatch(1); - final CountDownLatch processedLatch = new CountDownLatch(1); - clusterService.submitStateUpdateTask("test", new AckedClusterStateUpdateTask(null, null) { - @Override - protected Void newResponse(boolean acknowledged) { - return null; - } - - @Override - public boolean mustAck(DiscoveryNode discoveryNode) { - return false; - } - - @Override - public void onAllNodesAcked(@Nullable Throwable t) { - allNodesAcked.set(true); - latch.countDown(); - } - - @Override - public void onAckTimeout() { - ackTimeout.set(true); - latch.countDown(); - } - - @Override - public TimeValue ackTimeout() { - return TimeValue.timeValueSeconds(0); - } - - @Override - public TimeValue timeout() { - return TimeValue.timeValueSeconds(10); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - processedLatch.countDown(); - } - - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - executed.set(true); - return ClusterState.builder(currentState).build(); - } - - @Override - public void onFailure(String source, Throwable t) { - logger.error("failed to execute callback in test {}", t, source); - onFailure.set(true); - latch.countDown(); - } - }); - - ensureGreen(); - assertThat(latch.await(1, TimeUnit.SECONDS), equalTo(true)); - - assertThat(allNodesAcked.get(), equalTo(false)); - assertThat(ackTimeout.get(), equalTo(true)); - assertThat(executed.get(), equalTo(true)); - assertThat(onFailure.get(), equalTo(false)); - - assertThat(processedLatch.await(1, TimeUnit.SECONDS), equalTo(true)); - } - - @TestLogging("_root:debug,action.admin.cluster.tasks:trace") - public void testPendingUpdateTask() throws Exception { - Settings settings = settingsBuilder() - .put("discovery.type", "local") - .build(); - String node_0 = internalCluster().startNode(settings); - internalCluster().startCoordinatingOnlyNode(settings); - - final ClusterService clusterService = internalCluster().getInstance(ClusterService.class, node_0); - final CountDownLatch block1 = new CountDownLatch(1); - final CountDownLatch invoked1 = new CountDownLatch(1); - clusterService.submitStateUpdateTask("1", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) { - invoked1.countDown(); - try { - block1.await(); - } catch (InterruptedException e) { - fail(); - } - return currentState; - } - - @Override - public void onFailure(String source, Throwable t) { - invoked1.countDown(); - fail(); - } - }); - invoked1.await(); - final CountDownLatch invoked2 = new CountDownLatch(9); - for (int i = 2; i <= 10; i++) { - clusterService.submitStateUpdateTask(Integer.toString(i), new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) { - return currentState; - } - - @Override - public void onFailure(String source, Throwable t) { - fail(); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - invoked2.countDown(); - } - }); - } - - // there might be other tasks in this node, make sure to only take the ones we add into account in this test - - // The tasks can be re-ordered, so we need to check out-of-order - Set controlSources = new HashSet<>(Arrays.asList("1", "2", "3", "4", "5", "6", "7", "8", "9", "10")); - List pendingClusterTasks = clusterService.pendingTasks(); - assertThat(pendingClusterTasks.size(), greaterThanOrEqualTo(10)); - assertThat(pendingClusterTasks.get(0).getSource().string(), equalTo("1")); - assertThat(pendingClusterTasks.get(0).isExecuting(), equalTo(true)); - for (PendingClusterTask task : pendingClusterTasks) { - controlSources.remove(task.getSource().string()); - } - assertTrue(controlSources.isEmpty()); - - controlSources = new HashSet<>(Arrays.asList("1", "2", "3", "4", "5", "6", "7", "8", "9", "10")); - PendingClusterTasksResponse response = internalCluster().coordOnlyNodeClient().admin().cluster().preparePendingClusterTasks().execute().actionGet(); - assertThat(response.pendingTasks().size(), greaterThanOrEqualTo(10)); - assertThat(response.pendingTasks().get(0).getSource().string(), equalTo("1")); - assertThat(response.pendingTasks().get(0).isExecuting(), equalTo(true)); - for (PendingClusterTask task : response) { - controlSources.remove(task.getSource().string()); - } - assertTrue(controlSources.isEmpty()); - block1.countDown(); - invoked2.await(); - - // whenever we test for no tasks, we need to awaitBusy since this is a live node - assertTrue(awaitBusy(() -> clusterService.pendingTasks().isEmpty())); - waitNoPendingTasksOnAll(); - - final CountDownLatch block2 = new CountDownLatch(1); - final CountDownLatch invoked3 = new CountDownLatch(1); - clusterService.submitStateUpdateTask("1", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) { - invoked3.countDown(); - try { - block2.await(); - } catch (InterruptedException e) { - fail(); - } - return currentState; - } - - @Override - public void onFailure(String source, Throwable t) { - invoked3.countDown(); - fail(); - } - }); - invoked3.await(); - - for (int i = 2; i <= 5; i++) { - clusterService.submitStateUpdateTask(Integer.toString(i), new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) { - return currentState; - } - - @Override - public void onFailure(String source, Throwable t) { - fail(); - } - }); - } - Thread.sleep(100); - - pendingClusterTasks = clusterService.pendingTasks(); - assertThat(pendingClusterTasks.size(), greaterThanOrEqualTo(5)); - controlSources = new HashSet<>(Arrays.asList("1", "2", "3", "4", "5")); - for (PendingClusterTask task : pendingClusterTasks) { - controlSources.remove(task.getSource().string()); - } - assertTrue(controlSources.isEmpty()); - - response = internalCluster().coordOnlyNodeClient().admin().cluster().preparePendingClusterTasks().get(); - assertThat(response.pendingTasks().size(), greaterThanOrEqualTo(5)); - controlSources = new HashSet<>(Arrays.asList("1", "2", "3", "4", "5")); - for (PendingClusterTask task : response) { - if (controlSources.remove(task.getSource().string())) { - assertThat(task.getTimeInQueueInMillis(), greaterThan(0L)); - } - } - assertTrue(controlSources.isEmpty()); - block2.countDown(); - } - - public void testLocalNodeMasterListenerCallbacks() throws Exception { - Settings settings = settingsBuilder() - .put("discovery.type", "zen") - .put("discovery.zen.minimum_master_nodes", 1) - .put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "400ms") - .put("discovery.initial_state_timeout", "500ms") - .build(); - - String node_0 = internalCluster().startNode(settings); - ClusterService clusterService = internalCluster().getInstance(ClusterService.class); - MasterAwareService testService = internalCluster().getInstance(MasterAwareService.class); - - ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("1").get(); - assertThat(clusterHealth.isTimedOut(), equalTo(false)); - - // the first node should be a master as the minimum required is 1 - assertThat(clusterService.state().nodes().masterNode(), notNullValue()); - assertThat(clusterService.state().nodes().localNodeMaster(), is(true)); - assertThat(testService.master(), is(true)); - - String node_1 = internalCluster().startNode(settings); - final ClusterService clusterService1 = internalCluster().getInstance(ClusterService.class, node_1); - MasterAwareService testService1 = internalCluster().getInstance(MasterAwareService.class, node_1); - - clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").get(); - assertThat(clusterHealth.isTimedOut(), equalTo(false)); - - // the second node should not be the master as node1 is already the master. - assertThat(clusterService1.state().nodes().localNodeMaster(), is(false)); - assertThat(testService1.master(), is(false)); - - internalCluster().stopCurrentMasterNode(); - clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("1").get(); - assertThat(clusterHealth.isTimedOut(), equalTo(false)); - - // now that node0 is closed, node1 should be elected as master - assertThat(clusterService1.state().nodes().localNodeMaster(), is(true)); - assertThat(testService1.master(), is(true)); - - // start another node and set min_master_node - internalCluster().startNode(Settings.builder().put(settings)); - assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut()); - - Settings transientSettings = settingsBuilder() - .put("discovery.zen.minimum_master_nodes", 2) - .build(); - client().admin().cluster().prepareUpdateSettings().setTransientSettings(transientSettings).get(); - - // and shutdown the second node - internalCluster().stopRandomNonMasterNode(); - - // there should not be any master as the minimum number of required eligible masters is not met - awaitBusy(() -> clusterService1.state().nodes().masterNode() == null && clusterService1.state().status() == ClusterState.ClusterStateStatus.APPLIED); - assertThat(testService1.master(), is(false)); - - // bring the node back up - String node_2 = internalCluster().startNode(Settings.builder().put(settings).put(transientSettings)); - ClusterService clusterService2 = internalCluster().getInstance(ClusterService.class, node_2); - MasterAwareService testService2 = internalCluster().getInstance(MasterAwareService.class, node_2); - - // make sure both nodes see each other otherwise the masternode below could be null if node 2 is master and node 1 did'r receive the updated cluster state... - assertThat(internalCluster().client(node_1).admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setLocal(true).setWaitForNodes("2").get().isTimedOut(), is(false)); - assertThat(internalCluster().client(node_2).admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setLocal(true).setWaitForNodes("2").get().isTimedOut(), is(false)); - - // now that we started node1 again, a new master should be elected - assertThat(clusterService2.state().nodes().masterNode(), is(notNullValue())); - if (node_2.equals(clusterService2.state().nodes().masterNode().name())) { - assertThat(testService1.master(), is(false)); - assertThat(testService2.master(), is(true)); - } else { - assertThat(testService1.master(), is(true)); - assertThat(testService2.master(), is(false)); - } - } - - /** - * Note, this test can only work as long as we have a single thread executor executing the state update tasks! - */ - public void testPrioritizedTasks() throws Exception { - Settings settings = settingsBuilder() - .put("discovery.type", "local") - .build(); - internalCluster().startNode(settings); - ClusterService clusterService = internalCluster().getInstance(ClusterService.class); - BlockingTask block = new BlockingTask(Priority.IMMEDIATE); - clusterService.submitStateUpdateTask("test", block); - int taskCount = randomIntBetween(5, 20); - Priority[] priorities = Priority.values(); - - // will hold all the tasks in the order in which they were executed - List tasks = new ArrayList<>(taskCount); - CountDownLatch latch = new CountDownLatch(taskCount); - for (int i = 0; i < taskCount; i++) { - Priority priority = priorities[randomIntBetween(0, priorities.length - 1)]; - clusterService.submitStateUpdateTask("test", new PrioritizedTask(priority, latch, tasks)); - } - - block.release(); - latch.await(); - - Priority prevPriority = null; - for (PrioritizedTask task : tasks) { - if (prevPriority == null) { - prevPriority = task.priority(); - } else { - assertThat(task.priority().sameOrAfter(prevPriority), is(true)); - } - } - } - - /* - * test that a listener throwing an exception while handling a - * notification does not prevent publication notification to the - * executor - */ - public void testClusterStateTaskListenerThrowingExceptionIsOkay() throws InterruptedException { - Settings settings = settingsBuilder() - .put("discovery.type", "local") - .build(); - internalCluster().startNode(settings); - ClusterService clusterService = internalCluster().getInstance(ClusterService.class); - - final CountDownLatch latch = new CountDownLatch(1); - AtomicBoolean published = new AtomicBoolean(); - - clusterService.submitStateUpdateTask( - "testClusterStateTaskListenerThrowingExceptionIsOkay", - new Object(), - ClusterStateTaskConfig.build(Priority.NORMAL), - new ClusterStateTaskExecutor() { - @Override - public boolean runOnlyOnMaster() { - return false; - } - - @Override - public BatchResult execute(ClusterState currentState, List tasks) throws Exception { - ClusterState newClusterState = ClusterState.builder(currentState).build(); - return BatchResult.builder().successes(tasks).build(newClusterState); - } - - @Override - public void clusterStatePublished(ClusterState newClusterState) { - published.set(true); - latch.countDown(); - } - }, - new ClusterStateTaskListener() { - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - throw new IllegalStateException(source); - } - - @Override - public void onFailure(String source, Throwable t) { - } - } - ); - - latch.await(); - assertTrue(published.get()); - } - - // test that for a single thread, tasks are executed in the order - // that they are submitted - public void testClusterStateUpdateTasksAreExecutedInOrder() throws BrokenBarrierException, InterruptedException { - Settings settings = settingsBuilder() - .put("discovery.type", "local") - .build(); - internalCluster().startNode(settings); - ClusterService clusterService = internalCluster().getInstance(ClusterService.class); - - class TaskExecutor implements ClusterStateTaskExecutor { - List tasks = new ArrayList<>(); - - @Override - public BatchResult execute(ClusterState currentState, List tasks) throws Exception { - this.tasks.addAll(tasks); - return BatchResult.builder().successes(tasks).build(ClusterState.builder(currentState).build()); - } - - @Override - public boolean runOnlyOnMaster() { - return false; - } - } - - int numberOfThreads = randomIntBetween(2, 8); - TaskExecutor[] executors = new TaskExecutor[numberOfThreads]; - for (int i = 0; i < numberOfThreads; i++) { - executors[i] = new TaskExecutor(); - } - - int tasksSubmittedPerThread = randomIntBetween(2, 1024); - - CopyOnWriteArrayList> failures = new CopyOnWriteArrayList<>(); - CountDownLatch updateLatch = new CountDownLatch(numberOfThreads * tasksSubmittedPerThread); - - ClusterStateTaskListener listener = new ClusterStateTaskListener() { - @Override - public void onFailure(String source, Throwable t) { - logger.error("unexpected failure: [{}]", t, source); - failures.add(new Tuple<>(source, t)); - updateLatch.countDown(); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - updateLatch.countDown(); - } - }; - - CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads); - - for (int i = 0; i < numberOfThreads; i++) { - final int index = i; - Thread thread = new Thread(() -> { - try { - barrier.await(); - for (int j = 0; j < tasksSubmittedPerThread; j++) { - clusterService.submitStateUpdateTask("[" + index + "][" + j + "]", j, ClusterStateTaskConfig.build(randomFrom(Priority.values())), executors[index], listener); - } - barrier.await(); - } catch (InterruptedException | BrokenBarrierException e) { - throw new AssertionError(e); - } - }); - thread.start(); - } - - // wait for all threads to be ready - barrier.await(); - // wait for all threads to finish - barrier.await(); - - updateLatch.await(); - - assertThat(failures, empty()); - - for (int i = 0; i < numberOfThreads; i++) { - assertEquals(tasksSubmittedPerThread, executors[i].tasks.size()); - for (int j = 0; j < tasksSubmittedPerThread; j++) { - assertNotNull(executors[i].tasks.get(j)); - assertEquals("cluster state update task executed out of order", j, (int)executors[i].tasks.get(j)); - } - } - } - - public void testClusterStateBatchedUpdates() throws BrokenBarrierException, InterruptedException { - Settings settings = settingsBuilder() - .put("discovery.type", "local") - .build(); - internalCluster().startNode(settings); - ClusterService clusterService = internalCluster().getInstance(ClusterService.class); - - AtomicInteger counter = new AtomicInteger(); - class Task { - private AtomicBoolean state = new AtomicBoolean(); - - public void execute() { - if (!state.compareAndSet(false, true)) { - throw new IllegalStateException(); - } else { - counter.incrementAndGet(); - } - } - } - - int numberOfThreads = randomIntBetween(2, 8); - int tasksSubmittedPerThread = randomIntBetween(1, 1024); - int numberOfExecutors = Math.max(1, numberOfThreads / 4); - final Semaphore semaphore = new Semaphore(numberOfExecutors); - - class TaskExecutor implements ClusterStateTaskExecutor { - private AtomicInteger counter = new AtomicInteger(); - private AtomicInteger batches = new AtomicInteger(); - private AtomicInteger published = new AtomicInteger(); - - @Override - public BatchResult execute(ClusterState currentState, List tasks) throws Exception { - tasks.forEach(task -> task.execute()); - counter.addAndGet(tasks.size()); - ClusterState maybeUpdatedClusterState = currentState; - if (randomBoolean()) { - maybeUpdatedClusterState = ClusterState.builder(currentState).build(); - batches.incrementAndGet(); - semaphore.acquire(); - } - return BatchResult.builder().successes(tasks).build(maybeUpdatedClusterState); - } - - @Override - public boolean runOnlyOnMaster() { - return false; - } - - @Override - public void clusterStatePublished(ClusterState newClusterState) { - published.incrementAndGet(); - semaphore.release(); - } - } - - ConcurrentMap counters = new ConcurrentHashMap<>(); - CountDownLatch updateLatch = new CountDownLatch(numberOfThreads * tasksSubmittedPerThread); - ClusterStateTaskListener listener = new ClusterStateTaskListener() { - @Override - public void onFailure(String source, Throwable t) { - assert false; - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - counters.computeIfAbsent(source, key -> new AtomicInteger()).incrementAndGet(); - updateLatch.countDown(); - } - }; - - List executors = new ArrayList<>(); - for (int i = 0; i < numberOfExecutors; i++) { - executors.add(new TaskExecutor()); - } - - // randomly assign tasks to executors - List assignments = new ArrayList<>(); - for (int i = 0; i < numberOfThreads; i++) { - for (int j = 0; j < tasksSubmittedPerThread; j++) { - assignments.add(randomFrom(executors)); - } - } - - Map counts = new HashMap<>(); - for (TaskExecutor executor : assignments) { - counts.merge(executor, 1, (previous, one) -> previous + one); - } - - CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads); - for (int i = 0; i < numberOfThreads; i++) { - final int index = i; - Thread thread = new Thread(() -> { - try { - barrier.await(); - for (int j = 0; j < tasksSubmittedPerThread; j++) { - ClusterStateTaskExecutor executor = assignments.get(index * tasksSubmittedPerThread + j); - clusterService.submitStateUpdateTask( - Thread.currentThread().getName(), - new Task(), - ClusterStateTaskConfig.build(randomFrom(Priority.values())), - executor, - listener); - } - barrier.await(); - } catch (BrokenBarrierException | InterruptedException e) { - throw new AssertionError(e); - } - }); - thread.start(); - } - - // wait for all threads to be ready - barrier.await(); - // wait for all threads to finish - barrier.await(); - - // wait until all the cluster state updates have been processed - updateLatch.await(); - // and until all of the publication callbacks have completed - semaphore.acquire(numberOfExecutors); - - // assert the number of executed tasks is correct - assertEquals(numberOfThreads * tasksSubmittedPerThread, counter.get()); - - // assert each executor executed the correct number of tasks - for (TaskExecutor executor : executors) { - if (counts.containsKey(executor)) { - assertEquals((int) counts.get(executor), executor.counter.get()); - assertEquals(executor.batches.get(), executor.published.get()); - } - } - - // assert the correct number of clusterStateProcessed events were triggered - for (Map.Entry entry : counters.entrySet()) { - assertEquals(entry.getValue().get(), tasksSubmittedPerThread); - } - } - - @TestLogging("cluster:TRACE") // To ensure that we log cluster state events on TRACE level - public void testClusterStateUpdateLogging() throws Exception { - Settings settings = settingsBuilder() - .put("discovery.type", "local") - .build(); - internalCluster().startNode(settings); - ClusterService clusterService1 = internalCluster().getInstance(ClusterService.class); - MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test1", "cluster.service", Level.DEBUG, "*processing [test1]: took * no change in cluster_state")); - mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test2", "cluster.service", Level.TRACE, "*failed to execute cluster state update in *")); - mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test3", "cluster.service", Level.DEBUG, "*processing [test3]: took * done applying updated cluster_state (version: *, uuid: *)")); - - Logger rootLogger = Logger.getRootLogger(); - rootLogger.addAppender(mockAppender); - try { - final CountDownLatch latch = new CountDownLatch(4); - clusterService1.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - return currentState; - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - latch.countDown(); - } - - @Override - public void onFailure(String source, Throwable t) { - fail(); - } - }); - clusterService1.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) { - throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task"); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - fail(); - } - - @Override - public void onFailure(String source, Throwable t) { - latch.countDown(); - } - }); - clusterService1.submitStateUpdateTask("test3", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) { - return ClusterState.builder(currentState).incrementVersion().build(); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - latch.countDown(); - } - - @Override - public void onFailure(String source, Throwable t) { - fail(); - } - }); - // Additional update task to make sure all previous logging made it to the logger - // We don't check logging for this on since there is no guarantee that it will occur before our check - clusterService1.submitStateUpdateTask("test4", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) { - return currentState; - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - latch.countDown(); - } - - @Override - public void onFailure(String source, Throwable t) { - fail(); - } - }); - assertThat(latch.await(1, TimeUnit.SECONDS), equalTo(true)); - } finally { - rootLogger.removeAppender(mockAppender); - } - mockAppender.assertAllExpectationsMatched(); - } - - @TestLogging("cluster:WARN") // To ensure that we log cluster state events on WARN level - public void testLongClusterStateUpdateLogging() throws Exception { - Settings settings = settingsBuilder() - .put("discovery.type", "local") - .put(InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.getKey(), "10s") - .build(); - internalCluster().startNode(settings); - ClusterService clusterService1 = internalCluster().getInstance(ClusterService.class); - MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.addExpectation(new MockLogAppender.UnseenEventExpectation("test1 shouldn't see because setting is too low", "cluster.service", Level.WARN, "*cluster state update task [test1] took * above the warn threshold of *")); - mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test2", "cluster.service", Level.WARN, "*cluster state update task [test2] took * above the warn threshold of 10ms")); - mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test3", "cluster.service", Level.WARN, "*cluster state update task [test3] took * above the warn threshold of 10ms")); - mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test4", "cluster.service", Level.WARN, "*cluster state update task [test4] took * above the warn threshold of 10ms")); - - Logger rootLogger = Logger.getRootLogger(); - rootLogger.addAppender(mockAppender); - try { - final CountDownLatch latch = new CountDownLatch(5); - final CountDownLatch processedFirstTask = new CountDownLatch(1); - clusterService1.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - Thread.sleep(100); - return currentState; - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - latch.countDown(); - processedFirstTask.countDown(); - } - - @Override - public void onFailure(String source, Throwable t) { - fail(); - } - }); - - processedFirstTask.await(1, TimeUnit.SECONDS); - assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder() - .put(InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.getKey(), "10ms"))); - - clusterService1.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - Thread.sleep(100); - throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task"); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - fail(); - } - - @Override - public void onFailure(String source, Throwable t) { - latch.countDown(); - } - }); - clusterService1.submitStateUpdateTask("test3", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - Thread.sleep(100); - return ClusterState.builder(currentState).incrementVersion().build(); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - latch.countDown(); - } - - @Override - public void onFailure(String source, Throwable t) { - fail(); - } - }); - clusterService1.submitStateUpdateTask("test4", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - Thread.sleep(100); - return currentState; - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - latch.countDown(); - } - - @Override - public void onFailure(String source, Throwable t) { - fail(); - } - }); - // Additional update task to make sure all previous logging made it to the logger - // We don't check logging for this on since there is no guarantee that it will occur before our check - clusterService1.submitStateUpdateTask("test5", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) { - return currentState; - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - latch.countDown(); - } - - @Override - public void onFailure(String source, Throwable t) { - fail(); - } - }); - assertThat(latch.await(5, TimeUnit.SECONDS), equalTo(true)); - } finally { - rootLogger.removeAppender(mockAppender); - } - mockAppender.assertAllExpectationsMatched(); - } - - private static class BlockingTask extends ClusterStateUpdateTask { - private final CountDownLatch latch = new CountDownLatch(1); - - public BlockingTask(Priority priority) { - super(priority); - } - - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - latch.await(); - return currentState; - } - - @Override - public void onFailure(String source, Throwable t) { - } - - public void release() { - latch.countDown(); - } - - } - - private static class PrioritizedTask extends ClusterStateUpdateTask { - - private final CountDownLatch latch; - private final List tasks; - - private PrioritizedTask(Priority priority, CountDownLatch latch, List tasks) { - super(priority); - this.latch = latch; - this.tasks = tasks; - } - - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - tasks.add(this); - latch.countDown(); - return currentState; - } - - @Override - public void onFailure(String source, Throwable t) { - latch.countDown(); - } - } - - public static class TestPlugin extends Plugin { - - @Override - public String name() { - return "test plugin"; - } - - @Override - public String description() { - return "test plugin"; - } - - @Override - public Collection> nodeServices() { - List> services = new ArrayList<>(1); - services.add(MasterAwareService.class); - return services; - } - } - - @Singleton - public static class MasterAwareService extends AbstractLifecycleComponent implements LocalNodeMasterListener { - - private final ClusterService clusterService; - private volatile boolean master; - - @Inject - public MasterAwareService(Settings settings, ClusterService clusterService) { - super(settings); - clusterService.add(this); - this.clusterService = clusterService; - logger.info("initialized test service"); - } - - @Override - public void onMaster() { - logger.info("on master [" + clusterService.localNode() + "]"); - master = true; - } - - @Override - public void offMaster() { - logger.info("off master [" + clusterService.localNode() + "]"); - master = false; - } - - public boolean master() { - return master; - } - - @Override - protected void doStart() { - } - - @Override - protected void doStop() { - } - - @Override - protected void doClose() { - } - - @Override - public String executorName() { - return ThreadPool.Names.SAME; - } - - } -} diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java b/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java index addb753ff4a..c158fb1c360 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java @@ -639,6 +639,7 @@ public class ClusterStateDiffIT extends ESIntegTestCase { return new SnapshotsInProgress(new SnapshotsInProgress.Entry( new SnapshotId(randomName("repo"), randomName("snap")), randomBoolean(), + randomBoolean(), SnapshotsInProgress.State.fromValue((byte) randomIntBetween(0, 6)), Collections.emptyList(), Math.abs(randomLong()), diff --git a/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java b/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java index a74102f6969..424565f13bf 100644 --- a/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java @@ -92,22 +92,22 @@ public class DiskUsageTests extends ESTestCase { } public void testFillShardLevelInfo() { - final Index index = new Index("test", "_na_"); + final Index index = new Index("test", "0xdeadbeef"); ShardRouting test_0 = ShardRouting.newUnassigned(index, 0, null, false, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); ShardRoutingHelper.initialize(test_0, "node1"); ShardRoutingHelper.moveToStarted(test_0); - Path test0Path = createTempDir().resolve("indices").resolve("test").resolve("0"); + Path test0Path = createTempDir().resolve("indices").resolve(index.getUUID()).resolve("0"); CommonStats commonStats0 = new CommonStats(); commonStats0.store = new StoreStats(100, 1); ShardRouting test_1 = ShardRouting.newUnassigned(index, 1, null, false, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); ShardRoutingHelper.initialize(test_1, "node2"); ShardRoutingHelper.moveToStarted(test_1); - Path test1Path = createTempDir().resolve("indices").resolve("test").resolve("1"); + Path test1Path = createTempDir().resolve("indices").resolve(index.getUUID()).resolve("1"); CommonStats commonStats1 = new CommonStats(); commonStats1.store = new StoreStats(1000, 1); ShardStats[] stats = new ShardStats[] { - new ShardStats(test_0, new ShardPath(false, test0Path, test0Path, "0xdeadbeef", test_0.shardId()), commonStats0 , null), - new ShardStats(test_1, new ShardPath(false, test1Path, test1Path, "0xdeadbeef", test_1.shardId()), commonStats1 , null) + new ShardStats(test_0, new ShardPath(false, test0Path, test0Path, test_0.shardId()), commonStats0 , null), + new ShardStats(test_1, new ShardPath(false, test1Path, test1Path, test_1.shardId()), commonStats1 , null) }; ImmutableOpenMap.Builder shardSizes = ImmutableOpenMap.builder(); ImmutableOpenMap.Builder routingToPath = ImmutableOpenMap.builder(); @@ -143,11 +143,11 @@ public class DiskUsageTests extends ESTestCase { }; NodeStats[] nodeStats = new NodeStats[] { new NodeStats(new DiscoveryNode("node_1", DummyTransportAddress.INSTANCE, Version.CURRENT), 0, - null,null,null,null,null,new FsInfo(0, node1FSInfo), null,null,null,null,null), + null,null,null,null,null,new FsInfo(0, node1FSInfo), null,null,null,null,null, null), new NodeStats(new DiscoveryNode("node_2", DummyTransportAddress.INSTANCE, Version.CURRENT), 0, - null,null,null,null,null, new FsInfo(0, node2FSInfo), null,null,null,null,null), + null,null,null,null,null, new FsInfo(0, node2FSInfo), null,null,null,null,null, null), new NodeStats(new DiscoveryNode("node_3", DummyTransportAddress.INSTANCE, Version.CURRENT), 0, - null,null,null,null,null, new FsInfo(0, node3FSInfo), null,null,null,null,null) + null,null,null,null,null, new FsInfo(0, node3FSInfo), null,null,null,null,null, null) }; InternalClusterInfoService.fillDiskUsagePerNode(logger, nodeStats, newLeastAvaiableUsages, newMostAvaiableUsages); DiskUsage leastNode_1 = newLeastAvaiableUsages.get("node_1"); @@ -184,11 +184,11 @@ public class DiskUsageTests extends ESTestCase { }; NodeStats[] nodeStats = new NodeStats[] { new NodeStats(new DiscoveryNode("node_1", DummyTransportAddress.INSTANCE, Version.CURRENT), 0, - null,null,null,null,null,new FsInfo(0, node1FSInfo), null,null,null,null,null), + null,null,null,null,null,new FsInfo(0, node1FSInfo), null,null,null,null,null, null), new NodeStats(new DiscoveryNode("node_2", DummyTransportAddress.INSTANCE, Version.CURRENT), 0, - null,null,null,null,null, new FsInfo(0, node2FSInfo), null,null,null,null,null), + null,null,null,null,null, new FsInfo(0, node2FSInfo), null,null,null,null,null, null), new NodeStats(new DiscoveryNode("node_3", DummyTransportAddress.INSTANCE, Version.CURRENT), 0, - null,null,null,null,null, new FsInfo(0, node3FSInfo), null,null,null,null,null) + null,null,null,null,null, new FsInfo(0, node3FSInfo), null,null,null,null,null, null) }; InternalClusterInfoService.fillDiskUsagePerNode(logger, nodeStats, newLeastAvailableUsages, newMostAvailableUsages); DiskUsage leastNode_1 = newLeastAvailableUsages.get("node_1"); diff --git a/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java b/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java index 78128fe30f2..9c004a95f3b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java @@ -22,6 +22,7 @@ package org.elasticsearch.cluster; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; diff --git a/core/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java b/core/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java index 370f1464fd2..13b1d40b5d3 100644 --- a/core/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java @@ -235,7 +235,7 @@ public class NoMasterNodeIT extends ESIntegTestCase { ensureSearchable("test1", "test2"); ClusterStateResponse clusterState = client().admin().cluster().prepareState().get(); - logger.info("Cluster state:\n" + clusterState.getState().prettyPrint()); + logger.info("Cluster state:\n{}", clusterState.getState().prettyPrint()); internalCluster().stopRandomDataNode(); assertTrue(awaitBusy(() -> { diff --git a/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java new file mode 100644 index 00000000000..84c9e9f07a0 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java @@ -0,0 +1,275 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.component.Lifecycle; +import org.elasticsearch.common.component.LifecycleListener; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.TransportServiceAdapter; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.equalTo; + +public class NodeConnectionsServiceTests extends ESTestCase { + + private static ThreadPool THREAD_POOL; + private MockTransport transport; + private TransportService transportService; + + private List generateNodes() { + List nodes = new ArrayList<>(); + for (int i = randomIntBetween(20, 50); i > 0; i--) { + final HashMap attributes = new HashMap<>(); + if (rarely()) { + attributes.put("client", "true"); + } else { + attributes.put("master", "" + randomBoolean()); + attributes.put("data", "" + randomBoolean()); + attributes.put("ingest", "" + randomBoolean()); + } + nodes.add(new DiscoveryNode("node_" + i, "" + i, DummyTransportAddress.INSTANCE, attributes, Version.CURRENT)); + } + return nodes; + } + + private ClusterState clusterStateFromNodes(List nodes) { + final DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); + for (DiscoveryNode node : nodes) { + builder.put(node); + } + return ClusterState.builder(new ClusterName("test")).nodes(builder).build(); + } + + public void testConnectAndDisconnect() { + List nodes = generateNodes(); + NodeConnectionsService service = new NodeConnectionsService(Settings.EMPTY, THREAD_POOL, transportService); + + ClusterState current = clusterStateFromNodes(Collections.emptyList()); + ClusterChangedEvent event = new ClusterChangedEvent("test", clusterStateFromNodes(randomSubsetOf(nodes)), current); + + service.connectToAddedNodes(event); + assertConnected(event.nodesDelta().addedNodes()); + + service.disconnectFromRemovedNodes(event); + assertConnectedExactlyToNodes(event.state()); + + current = event.state(); + event = new ClusterChangedEvent("test", clusterStateFromNodes(randomSubsetOf(nodes)), current); + + service.connectToAddedNodes(event); + assertConnected(event.nodesDelta().addedNodes()); + + service.disconnectFromRemovedNodes(event); + assertConnectedExactlyToNodes(event.state()); + } + + + public void testReconnect() { + List nodes = generateNodes(); + NodeConnectionsService service = new NodeConnectionsService(Settings.EMPTY, THREAD_POOL, transportService); + + ClusterState current = clusterStateFromNodes(Collections.emptyList()); + ClusterChangedEvent event = new ClusterChangedEvent("test", clusterStateFromNodes(randomSubsetOf(nodes)), current); + + transport.randomConnectionExceptions = true; + + service.connectToAddedNodes(event); + + for (int i = 0; i < 3; i++) { + // simulate disconnects + for (DiscoveryNode node : randomSubsetOf(nodes)) { + transport.disconnectFromNode(node); + } + service.new ConnectionChecker().run(); + } + + // disable exceptions so things can be restored + transport.randomConnectionExceptions = false; + service.new ConnectionChecker().run(); + assertConnectedExactlyToNodes(event.state()); + } + + private void assertConnectedExactlyToNodes(ClusterState state) { + assertConnected(state.nodes()); + assertThat(transport.connectedNodes.size(), equalTo(state.nodes().size())); + } + + private void assertConnected(Iterable nodes) { + for (DiscoveryNode node : nodes) { + assertTrue("not connected to " + node, transport.connectedNodes.contains(node)); + } + } + + private void assertNotConnected(Iterable nodes) { + for (DiscoveryNode node : nodes) { + assertFalse("still connected to " + node, transport.connectedNodes.contains(node)); + } + } + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + this.transport = new MockTransport(); + transportService = new TransportService(transport, THREAD_POOL); + transportService.start(); + transportService.acceptIncomingRequests(); + } + + @Override + @After + public void tearDown() throws Exception { + transportService.stop(); + super.tearDown(); + } + + @AfterClass + public static void stopThreadPool() { + ThreadPool.terminate(THREAD_POOL, 30, TimeUnit.SECONDS); + THREAD_POOL = null; + } + + + final class MockTransport implements Transport { + + Set connectedNodes = ConcurrentCollections.newConcurrentSet(); + volatile boolean randomConnectionExceptions = false; + + @Override + public void transportServiceAdapter(TransportServiceAdapter service) { + + } + + @Override + public BoundTransportAddress boundAddress() { + return null; + } + + @Override + public Map profileBoundAddresses() { + return null; + } + + @Override + public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws Exception { + return new TransportAddress[0]; + } + + @Override + public boolean addressSupported(Class address) { + return false; + } + + @Override + public boolean nodeConnected(DiscoveryNode node) { + return connectedNodes.contains(node); + } + + @Override + public void connectToNode(DiscoveryNode node) throws ConnectTransportException { + if (connectedNodes.contains(node) == false && randomConnectionExceptions && randomBoolean()) { + throw new ConnectTransportException(node, "simulated"); + } + connectedNodes.add(node); + } + + @Override + public void connectToNodeLight(DiscoveryNode node) throws ConnectTransportException { + + } + + @Override + public void disconnectFromNode(DiscoveryNode node) { + connectedNodes.remove(node); + } + + @Override + public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, + TransportRequestOptions options) throws IOException, TransportException { + + } + + @Override + public long serverOpen() { + return 0; + } + + @Override + public List getLocalAddresses() { + return null; + } + + @Override + public Lifecycle.State lifecycleState() { + return null; + } + + @Override + public void addLifecycleListener(LifecycleListener listener) { + + } + + @Override + public void removeLifecycleListener(LifecycleListener listener) { + + } + + @Override + public Transport start() { + return null; + } + + @Override + public Transport stop() { + return null; + } + + @Override + public void close() { + + } + } +} \ No newline at end of file diff --git a/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java b/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java index 6339c700eec..29ce8e7a636 100644 --- a/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNodeService; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; @@ -40,7 +41,6 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; -import org.elasticsearch.cluster.service.InternalClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.test.ESAllocationTestCase; @@ -305,7 +305,7 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa return randomSubsetOf(1, shards.toArray(new ShardRouting[0])).get(0); } else { return - TestShardRouting.newShardRouting(shardRouting.index(), shardRouting.id(), InternalClusterService.generateNodeId(Settings.EMPTY), randomBoolean(), randomFrom(ShardRoutingState.values())); + TestShardRouting.newShardRouting(shardRouting.index(), shardRouting.id(), DiscoveryNodeService.generateNodeId(Settings.EMPTY), randomBoolean(), randomFrom(ShardRoutingState.values())); } } diff --git a/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java b/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java index c4031edc2d6..be8984830e4 100644 --- a/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.cluster.action.shard; import org.apache.lucene.index.CorruptIndexException; import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.NotMasterException; @@ -34,10 +33,10 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.ShardsIterator; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.cluster.TestClusterService; import org.elasticsearch.test.transport.CapturingTransport; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.NodeDisconnectedException; @@ -57,6 +56,8 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.function.LongConsumer; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.createClusterService; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.setState; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -67,7 +68,7 @@ public class ShardStateActionTests extends ESTestCase { private TestShardStateAction shardStateAction; private CapturingTransport transport; private TransportService transportService; - private TestClusterService clusterService; + private ClusterService clusterService; private static class TestShardStateAction extends ShardStateAction { public TestShardStateAction(Settings settings, ClusterService clusterService, TransportService transportService, AllocationService allocationService, RoutingService routingService) { @@ -104,19 +105,22 @@ public class ShardStateActionTests extends ESTestCase { public void setUp() throws Exception { super.setUp(); this.transport = new CapturingTransport(); - clusterService = new TestClusterService(THREAD_POOL); + clusterService = createClusterService(THREAD_POOL); transportService = new TransportService(transport, THREAD_POOL); transportService.start(); transportService.acceptIncomingRequests(); shardStateAction = new TestShardStateAction(Settings.EMPTY, clusterService, transportService, null, null); - shardStateAction.setOnBeforeWaitForNewMasterAndRetry(() -> {}); - shardStateAction.setOnAfterWaitForNewMasterAndRetry(() -> {}); + shardStateAction.setOnBeforeWaitForNewMasterAndRetry(() -> { + }); + shardStateAction.setOnAfterWaitForNewMasterAndRetry(() -> { + }); } @Override @After public void tearDown() throws Exception { - transportService.stop(); + clusterService.close(); + transportService.close(); super.tearDown(); } @@ -129,7 +133,7 @@ public class ShardStateActionTests extends ESTestCase { public void testSuccess() throws InterruptedException { final String index = "test"; - clusterService.setState(ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5))); + setState(clusterService, ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5))); AtomicBoolean success = new AtomicBoolean(); CountDownLatch latch = new CountDownLatch(1); @@ -154,7 +158,7 @@ public class ShardStateActionTests extends ESTestCase { assertEquals(1, capturedRequests.length); // the request is a shard failed request assertThat(capturedRequests[0].request, is(instanceOf(ShardStateAction.ShardRoutingEntry.class))); - ShardStateAction.ShardRoutingEntry shardRoutingEntry = (ShardStateAction.ShardRoutingEntry)capturedRequests[0].request; + ShardStateAction.ShardRoutingEntry shardRoutingEntry = (ShardStateAction.ShardRoutingEntry) capturedRequests[0].request; // for the right shard assertEquals(shardRouting, shardRoutingEntry.getShardRouting()); // sent to the master @@ -169,17 +173,18 @@ public class ShardStateActionTests extends ESTestCase { public void testNoMaster() throws InterruptedException { final String index = "test"; - clusterService.setState(ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5))); + setState(clusterService, ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5))); DiscoveryNodes.Builder noMasterBuilder = DiscoveryNodes.builder(clusterService.state().nodes()); noMasterBuilder.masterNodeId(null); - clusterService.setState(ClusterState.builder(clusterService.state()).nodes(noMasterBuilder)); + setState(clusterService, ClusterState.builder(clusterService.state()).nodes(noMasterBuilder)); CountDownLatch latch = new CountDownLatch(1); AtomicInteger retries = new AtomicInteger(); AtomicBoolean success = new AtomicBoolean(); - setUpMasterRetryVerification(1, retries, latch, requestId -> {}); + setUpMasterRetryVerification(1, retries, latch, requestId -> { + }); ShardRouting failedShard = getRandomShardRouting(index); shardStateAction.shardFailed(failedShard, failedShard, "test", getSimulatedFailure(), new ShardStateAction.Listener() { @@ -206,7 +211,7 @@ public class ShardStateActionTests extends ESTestCase { public void testMasterChannelException() throws InterruptedException { final String index = "test"; - clusterService.setState(ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5))); + setState(clusterService, ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5))); CountDownLatch latch = new CountDownLatch(1); AtomicInteger retries = new AtomicInteger(); @@ -216,8 +221,8 @@ public class ShardStateActionTests extends ESTestCase { LongConsumer retryLoop = requestId -> { if (randomBoolean()) { transport.handleRemoteError( - requestId, - randomFrom(new NotMasterException("simulated"), new Discovery.FailedToCommitClusterStateException("simulated"))); + requestId, + randomFrom(new NotMasterException("simulated"), new Discovery.FailedToCommitClusterStateException("simulated"))); } else { if (randomBoolean()) { transport.handleLocalError(requestId, new NodeNotConnectedException(null, "simulated")); @@ -262,7 +267,7 @@ public class ShardStateActionTests extends ESTestCase { public void testUnhandledFailure() { final String index = "test"; - clusterService.setState(ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5))); + setState(clusterService, ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5))); AtomicBoolean failure = new AtomicBoolean(); @@ -291,14 +296,14 @@ public class ShardStateActionTests extends ESTestCase { public void testShardNotFound() throws InterruptedException { final String index = "test"; - clusterService.setState(ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5))); + setState(clusterService, ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5))); AtomicBoolean success = new AtomicBoolean(); CountDownLatch latch = new CountDownLatch(1); ShardRouting failedShard = getRandomShardRouting(index); RoutingTable routingTable = RoutingTable.builder(clusterService.state().getRoutingTable()).remove(index).build(); - clusterService.setState(ClusterState.builder(clusterService.state()).routingTable(routingTable)); + setState(clusterService, ClusterState.builder(clusterService.state()).routingTable(routingTable)); shardStateAction.shardFailed(failedShard, failedShard, "test", getSimulatedFailure(), new ShardStateAction.Listener() { @Override public void onSuccess() { @@ -324,7 +329,7 @@ public class ShardStateActionTests extends ESTestCase { public void testNoLongerPrimaryShardException() throws InterruptedException { final String index = "test"; - clusterService.setState(ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5))); + setState(clusterService, ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5))); ShardRouting failedShard = getRandomShardRouting(index); @@ -349,7 +354,7 @@ public class ShardStateActionTests extends ESTestCase { }); ShardStateAction.NoLongerPrimaryShardException catastrophicError = - new ShardStateAction.NoLongerPrimaryShardException(failedShard.shardId(), "source shard [" + sourceFailedShard + " is neither the local allocation nor the primary allocation"); + new ShardStateAction.NoLongerPrimaryShardException(failedShard.shardId(), "source shard [" + sourceFailedShard + " is neither the local allocation nor the primary allocation"); CapturingTransport.CapturedRequest[] capturedRequests = transport.getCapturedRequestsAndClear(); transport.handleRemoteError(capturedRequests[0].requestId, catastrophicError); @@ -371,7 +376,7 @@ public class ShardStateActionTests extends ESTestCase { shardStateAction.setOnBeforeWaitForNewMasterAndRetry(() -> { DiscoveryNodes.Builder masterBuilder = DiscoveryNodes.builder(clusterService.state().nodes()); masterBuilder.masterNodeId(clusterService.state().nodes().masterNodes().iterator().next().value.id()); - clusterService.setState(ClusterState.builder(clusterService.state()).nodes(masterBuilder)); + setState(clusterService, ClusterState.builder(clusterService.state()).nodes(masterBuilder)); }); shardStateAction.setOnAfterWaitForNewMasterAndRetry(() -> verifyRetry(numberOfRetries, retries, latch, retryLoop)); diff --git a/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java b/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java index cc5ce05aca6..2016175a49c 100644 --- a/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java @@ -22,8 +22,10 @@ package org.elasticsearch.cluster.allocation; import org.apache.lucene.util.IOUtils; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; @@ -42,6 +44,7 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; @@ -226,9 +229,10 @@ public class ClusterRerouteIT extends ESIntegTestCase { assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.STARTED)); client().prepareIndex("test", "type", "1").setSource("field", "value").setRefresh(true).execute().actionGet(); + final Index index = resolveIndex("test"); logger.info("--> closing all nodes"); - Path[] shardLocation = internalCluster().getInstance(NodeEnvironment.class, node_1).availableShardPaths(new ShardId("test", "_na_", 0)); + Path[] shardLocation = internalCluster().getInstance(NodeEnvironment.class, node_1).availableShardPaths(new ShardId(index, 0)); assertThat(FileSystemUtils.exists(shardLocation), equalTo(true)); // make sure the data is there! internalCluster().closeNonSharedNodes(false); // don't wipe data directories the index needs to be there! diff --git a/core/src/test/java/org/elasticsearch/cluster/allocation/ShardsAllocatorModuleIT.java b/core/src/test/java/org/elasticsearch/cluster/allocation/ShardsAllocatorModuleIT.java deleted file mode 100644 index 60fa45ebfa1..00000000000 --- a/core/src/test/java/org/elasticsearch/cluster/allocation/ShardsAllocatorModuleIT.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cluster.allocation; - -import org.elasticsearch.cluster.ClusterModule; -import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; -import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.ESIntegTestCase.ClusterScope; - -import java.io.IOException; - -import static org.elasticsearch.common.settings.Settings.settingsBuilder; -import static org.elasticsearch.test.ESIntegTestCase.Scope; -import static org.hamcrest.Matchers.instanceOf; - -@ClusterScope(scope= Scope.TEST, numDataNodes =0) -public class ShardsAllocatorModuleIT extends ESIntegTestCase { - - public void testLoadDefaultShardsAllocator() throws IOException { - assertAllocatorInstance(Settings.Builder.EMPTY_SETTINGS, BalancedShardsAllocator.class); - } - - public void testLoadByShortKeyShardsAllocator() throws IOException { - Settings build = settingsBuilder().put(ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING.getKey(), "even_shard") // legacy just to make sure we don't barf - .build(); - assertAllocatorInstance(build, BalancedShardsAllocator.class); - build = settingsBuilder().put(ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING.getKey(), ClusterModule.BALANCED_ALLOCATOR).build(); - assertAllocatorInstance(build, BalancedShardsAllocator.class); - } - - private void assertAllocatorInstance(Settings settings, Class clazz) throws IOException { - while (cluster().size() != 0) { - internalCluster().stopRandomDataNode(); - } - internalCluster().startNode(settings); - ShardsAllocator instance = internalCluster().getInstance(ShardsAllocator.class); - assertThat(instance, instanceOf(clazz)); - } -} diff --git a/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java b/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java index 3562fa313ba..9c0f1014dcf 100644 --- a/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java @@ -64,7 +64,7 @@ public class ClusterStateHealthTests extends ESTestCase { routingTable.add(indexRoutingTable); } ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable.build()).build(); - String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, IndicesOptions.strictExpand(), (String[]) null); + String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, IndicesOptions.strictExpand(), (String[]) null); ClusterStateHealth clusterStateHealth = new ClusterStateHealth(clusterState, concreteIndices); logger.info("cluster status: {}, expected {}", clusterStateHealth.getStatus(), counter.status()); clusterStateHealth = maybeSerialize(clusterStateHealth); @@ -91,7 +91,7 @@ public class ClusterStateHealthTests extends ESTestCase { metaData.put(indexMetaData, true); routingTable.add(indexRoutingTable); ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable.build()).build(); - String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, IndicesOptions.strictExpand(), (String[]) null); + String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, IndicesOptions.strictExpand(), (String[]) null); ClusterStateHealth clusterStateHealth = new ClusterStateHealth(clusterState, concreteIndices); clusterStateHealth = maybeSerialize(clusterStateHealth); // currently we have no cluster level validation failures as index validation issues are reported per index. diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java index 1c61292d87c..0e0c9fb442b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java @@ -61,79 +61,79 @@ public class IndexNameExpressionResolverTests extends ESTestCase { IndicesOptions[] indicesOptions = new IndicesOptions[]{ IndicesOptions.strictExpandOpen(), IndicesOptions.strictExpand()}; for (IndicesOptions options : indicesOptions) { IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, options); - String[] results = indexNameExpressionResolver.concreteIndices(context, "foo"); + String[] results = indexNameExpressionResolver.concreteIndexNames(context, "foo"); assertEquals(1, results.length); assertEquals("foo", results[0]); try { - indexNameExpressionResolver.concreteIndices(context, "bar"); + indexNameExpressionResolver.concreteIndexNames(context, "bar"); fail(); } catch (IndexNotFoundException e) { assertThat(e.getIndex().getName(), equalTo("bar")); } - results = indexNameExpressionResolver.concreteIndices(context, "foofoo", "foobar"); + results = indexNameExpressionResolver.concreteIndexNames(context, "foofoo", "foobar"); assertEquals(2, results.length); assertThat(results, arrayContainingInAnyOrder("foofoo", "foobar")); - results = indexNameExpressionResolver.concreteIndices(context, "foofoobar"); + results = indexNameExpressionResolver.concreteIndexNames(context, "foofoobar"); assertEquals(new HashSet<>(Arrays.asList("foo", "foobar")), new HashSet<>(Arrays.asList(results))); try { - indexNameExpressionResolver.concreteIndices(context, "bar"); + indexNameExpressionResolver.concreteIndexNames(context, "bar"); fail(); } catch (IndexNotFoundException e) { assertThat(e.getIndex().getName(), equalTo("bar")); } try { - indexNameExpressionResolver.concreteIndices(context, "foo", "bar"); + indexNameExpressionResolver.concreteIndexNames(context, "foo", "bar"); fail(); } catch (IndexNotFoundException e) { assertThat(e.getIndex().getName(), equalTo("bar")); } - results = indexNameExpressionResolver.concreteIndices(context, "barbaz", "foobar"); + results = indexNameExpressionResolver.concreteIndexNames(context, "barbaz", "foobar"); assertEquals(2, results.length); assertThat(results, arrayContainingInAnyOrder("foofoo", "foobar")); try { - indexNameExpressionResolver.concreteIndices(context, "barbaz", "bar"); + indexNameExpressionResolver.concreteIndexNames(context, "barbaz", "bar"); fail(); } catch (IndexNotFoundException e) { assertThat(e.getIndex().getName(), equalTo("bar")); } - results = indexNameExpressionResolver.concreteIndices(context, "baz*"); + results = indexNameExpressionResolver.concreteIndexNames(context, "baz*"); assertThat(results, emptyArray()); - results = indexNameExpressionResolver.concreteIndices(context, "foo", "baz*"); + results = indexNameExpressionResolver.concreteIndexNames(context, "foo", "baz*"); assertEquals(1, results.length); assertEquals("foo", results[0]); } IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictExpandOpen()); - String[] results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY); + String[] results = indexNameExpressionResolver.concreteIndexNames(context, Strings.EMPTY_ARRAY); assertEquals(3, results.length); - results = indexNameExpressionResolver.concreteIndices(context, (String[])null); + results = indexNameExpressionResolver.concreteIndexNames(context, (String[])null); assertEquals(3, results.length); context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictExpand()); - results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY); + results = indexNameExpressionResolver.concreteIndexNames(context, Strings.EMPTY_ARRAY); assertEquals(4, results.length); - results = indexNameExpressionResolver.concreteIndices(context, (String[])null); + results = indexNameExpressionResolver.concreteIndexNames(context, (String[])null); assertEquals(4, results.length); context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictExpandOpen()); - results = indexNameExpressionResolver.concreteIndices(context, "foofoo*"); + results = indexNameExpressionResolver.concreteIndexNames(context, "foofoo*"); assertEquals(3, results.length); assertThat(results, arrayContainingInAnyOrder("foo", "foobar", "foofoo")); context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictExpand()); - results = indexNameExpressionResolver.concreteIndices(context, "foofoo*"); + results = indexNameExpressionResolver.concreteIndexNames(context, "foofoo*"); assertEquals(4, results.length); assertThat(results, arrayContainingInAnyOrder("foo", "foobar", "foofoo", "foofoo-closed")); } @@ -150,57 +150,57 @@ public class IndexNameExpressionResolverTests extends ESTestCase { IndicesOptions[] indicesOptions = new IndicesOptions[]{ IndicesOptions.lenientExpandOpen(), lenientExpand}; for (IndicesOptions options : indicesOptions) { IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, options); - String[] results = indexNameExpressionResolver.concreteIndices(context, "foo"); + String[] results = indexNameExpressionResolver.concreteIndexNames(context, "foo"); assertEquals(1, results.length); assertEquals("foo", results[0]); - results = indexNameExpressionResolver.concreteIndices(context, "bar"); + results = indexNameExpressionResolver.concreteIndexNames(context, "bar"); assertThat(results, emptyArray()); - results = indexNameExpressionResolver.concreteIndices(context, "foofoo", "foobar"); + results = indexNameExpressionResolver.concreteIndexNames(context, "foofoo", "foobar"); assertEquals(2, results.length); assertThat(results, arrayContainingInAnyOrder("foofoo", "foobar")); - results = indexNameExpressionResolver.concreteIndices(context, "foofoobar"); + results = indexNameExpressionResolver.concreteIndexNames(context, "foofoobar"); assertEquals(2, results.length); assertEquals(new HashSet<>(Arrays.asList("foo", "foobar")), new HashSet<>(Arrays.asList(results))); - results = indexNameExpressionResolver.concreteIndices(context, "foo", "bar"); + results = indexNameExpressionResolver.concreteIndexNames(context, "foo", "bar"); assertEquals(1, results.length); assertThat(results, arrayContainingInAnyOrder("foo")); - results = indexNameExpressionResolver.concreteIndices(context, "barbaz", "foobar"); + results = indexNameExpressionResolver.concreteIndexNames(context, "barbaz", "foobar"); assertEquals(2, results.length); assertThat(results, arrayContainingInAnyOrder("foofoo", "foobar")); - results = indexNameExpressionResolver.concreteIndices(context, "barbaz", "bar"); + results = indexNameExpressionResolver.concreteIndexNames(context, "barbaz", "bar"); assertEquals(1, results.length); assertThat(results, arrayContainingInAnyOrder("foofoo")); - results = indexNameExpressionResolver.concreteIndices(context, "baz*"); + results = indexNameExpressionResolver.concreteIndexNames(context, "baz*"); assertThat(results, emptyArray()); - results = indexNameExpressionResolver.concreteIndices(context, "foo", "baz*"); + results = indexNameExpressionResolver.concreteIndexNames(context, "foo", "baz*"); assertEquals(1, results.length); assertEquals("foo", results[0]); } IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); - String[] results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY); + String[] results = indexNameExpressionResolver.concreteIndexNames(context, Strings.EMPTY_ARRAY); assertEquals(3, results.length); context = new IndexNameExpressionResolver.Context(state, lenientExpand); - results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY); + results = indexNameExpressionResolver.concreteIndexNames(context, Strings.EMPTY_ARRAY); assertEquals(Arrays.toString(results), 4, results.length); context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); - results = indexNameExpressionResolver.concreteIndices(context, "foofoo*"); + results = indexNameExpressionResolver.concreteIndexNames(context, "foofoo*"); assertEquals(3, results.length); assertThat(results, arrayContainingInAnyOrder("foo", "foobar", "foofoo")); context = new IndexNameExpressionResolver.Context(state, lenientExpand); - results = indexNameExpressionResolver.concreteIndices(context, "foofoo*"); + results = indexNameExpressionResolver.concreteIndexNames(context, "foofoo*"); assertEquals(4, results.length); assertThat(results, arrayContainingInAnyOrder("foo", "foobar", "foofoo", "foofoo-closed")); } @@ -219,26 +219,26 @@ public class IndexNameExpressionResolverTests extends ESTestCase { for (IndicesOptions options : indicesOptions) { IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, options); - String[] results = indexNameExpressionResolver.concreteIndices(context, "foo"); + String[] results = indexNameExpressionResolver.concreteIndexNames(context, "foo"); assertEquals(1, results.length); assertEquals("foo", results[0]); try { - indexNameExpressionResolver.concreteIndices(context, "bar"); + indexNameExpressionResolver.concreteIndexNames(context, "bar"); fail(); } catch(IndexNotFoundException e) { assertThat(e.getIndex().getName(), equalTo("bar")); } try { - indexNameExpressionResolver.concreteIndices(context, "baz*"); + indexNameExpressionResolver.concreteIndexNames(context, "baz*"); fail(); } catch (IndexNotFoundException e) { assertThat(e.getIndex().getName(), equalTo("baz*")); } try { - indexNameExpressionResolver.concreteIndices(context, "foo", "baz*"); + indexNameExpressionResolver.concreteIndexNames(context, "foo", "baz*"); fail(); } catch (IndexNotFoundException e) { assertThat(e.getIndex().getName(), equalTo("baz*")); @@ -246,11 +246,11 @@ public class IndexNameExpressionResolverTests extends ESTestCase { } IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, expandOpen); - String[] results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY); + String[] results = indexNameExpressionResolver.concreteIndexNames(context, Strings.EMPTY_ARRAY); assertEquals(3, results.length); context = new IndexNameExpressionResolver.Context(state, expand); - results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY); + results = indexNameExpressionResolver.concreteIndexNames(context, Strings.EMPTY_ARRAY); assertEquals(4, results.length); } @@ -264,60 +264,60 @@ public class IndexNameExpressionResolverTests extends ESTestCase { // Only closed IndicesOptions options = IndicesOptions.fromOptions(false, true, false, true); IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, options); - String[] results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY); + String[] results = indexNameExpressionResolver.concreteIndexNames(context, Strings.EMPTY_ARRAY); assertEquals(1, results.length); assertEquals("foo", results[0]); - results = indexNameExpressionResolver.concreteIndices(context, "foo*"); + results = indexNameExpressionResolver.concreteIndexNames(context, "foo*"); assertEquals(1, results.length); assertEquals("foo", results[0]); // no wildcards, so wildcard expansion don't apply - results = indexNameExpressionResolver.concreteIndices(context, "bar"); + results = indexNameExpressionResolver.concreteIndexNames(context, "bar"); assertEquals(1, results.length); assertEquals("bar", results[0]); // Only open options = IndicesOptions.fromOptions(false, true, true, false); context = new IndexNameExpressionResolver.Context(state, options); - results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY); + results = indexNameExpressionResolver.concreteIndexNames(context, Strings.EMPTY_ARRAY); assertEquals(2, results.length); assertThat(results, arrayContainingInAnyOrder("bar", "foobar")); - results = indexNameExpressionResolver.concreteIndices(context, "foo*"); + results = indexNameExpressionResolver.concreteIndexNames(context, "foo*"); assertEquals(1, results.length); assertEquals("foobar", results[0]); - results = indexNameExpressionResolver.concreteIndices(context, "bar"); + results = indexNameExpressionResolver.concreteIndexNames(context, "bar"); assertEquals(1, results.length); assertEquals("bar", results[0]); // Open and closed options = IndicesOptions.fromOptions(false, true, true, true); context = new IndexNameExpressionResolver.Context(state, options); - results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY); + results = indexNameExpressionResolver.concreteIndexNames(context, Strings.EMPTY_ARRAY); assertEquals(3, results.length); assertThat(results, arrayContainingInAnyOrder("bar", "foobar", "foo")); - results = indexNameExpressionResolver.concreteIndices(context, "foo*"); + results = indexNameExpressionResolver.concreteIndexNames(context, "foo*"); assertEquals(2, results.length); assertThat(results, arrayContainingInAnyOrder("foobar", "foo")); - results = indexNameExpressionResolver.concreteIndices(context, "bar"); + results = indexNameExpressionResolver.concreteIndexNames(context, "bar"); assertEquals(1, results.length); assertEquals("bar", results[0]); - results = indexNameExpressionResolver.concreteIndices(context, "-foo*"); + results = indexNameExpressionResolver.concreteIndexNames(context, "-foo*"); assertEquals(1, results.length); assertEquals("bar", results[0]); - results = indexNameExpressionResolver.concreteIndices(context, "-*"); + results = indexNameExpressionResolver.concreteIndexNames(context, "-*"); assertEquals(0, results.length); options = IndicesOptions.fromOptions(false, false, true, true); context = new IndexNameExpressionResolver.Context(state, options); try { - indexNameExpressionResolver.concreteIndices(context, "-*"); + indexNameExpressionResolver.concreteIndexNames(context, "-*"); fail(); } catch (IndexNotFoundException e) { assertThat(e.getResourceId().toString(), equalTo("[-*]")); @@ -336,21 +336,21 @@ public class IndexNameExpressionResolverTests extends ESTestCase { { IndicesOptions noExpandLenient = IndicesOptions.fromOptions(true, true, false, false); IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, noExpandLenient); - String[] results = indexNameExpressionResolver.concreteIndices(context, "baz*"); + String[] results = indexNameExpressionResolver.concreteIndexNames(context, "baz*"); assertThat(results, emptyArray()); - results = indexNameExpressionResolver.concreteIndices(context, "foo", "baz*"); + results = indexNameExpressionResolver.concreteIndexNames(context, "foo", "baz*"); assertEquals(1, results.length); assertEquals("foo", results[0]); - results = indexNameExpressionResolver.concreteIndices(context, "foofoobar"); + results = indexNameExpressionResolver.concreteIndexNames(context, "foofoobar"); assertEquals(2, results.length); assertThat(results, arrayContainingInAnyOrder("foo", "foobar")); - results = indexNameExpressionResolver.concreteIndices(context, (String[])null); + results = indexNameExpressionResolver.concreteIndexNames(context, (String[])null); assertEquals(0, results.length); - results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY); + results = indexNameExpressionResolver.concreteIndexNames(context, Strings.EMPTY_ARRAY); assertEquals(0, results.length); } @@ -359,17 +359,17 @@ public class IndexNameExpressionResolverTests extends ESTestCase { IndicesOptions noExpandDisallowEmpty = IndicesOptions.fromOptions(true, false, false, false); IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, noExpandDisallowEmpty); try { - indexNameExpressionResolver.concreteIndices(context, "baz*"); + indexNameExpressionResolver.concreteIndexNames(context, "baz*"); fail(); } catch (IndexNotFoundException e) { assertThat(e.getIndex().getName(), equalTo("baz*")); } - String[] results = indexNameExpressionResolver.concreteIndices(context, "foo", "baz*"); + String[] results = indexNameExpressionResolver.concreteIndexNames(context, "foo", "baz*"); assertEquals(1, results.length); assertEquals("foo", results[0]); - results = indexNameExpressionResolver.concreteIndices(context, "foofoobar"); + results = indexNameExpressionResolver.concreteIndexNames(context, "foofoobar"); assertEquals(2, results.length); assertThat(results, arrayContainingInAnyOrder("foo", "foobar")); } @@ -378,17 +378,17 @@ public class IndexNameExpressionResolverTests extends ESTestCase { { IndicesOptions noExpandErrorUnavailable = IndicesOptions.fromOptions(false, true, false, false); IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, noExpandErrorUnavailable); - String[] results = indexNameExpressionResolver.concreteIndices(context, "baz*"); + String[] results = indexNameExpressionResolver.concreteIndexNames(context, "baz*"); assertThat(results, emptyArray()); try { - indexNameExpressionResolver.concreteIndices(context, "foo", "baz*"); + indexNameExpressionResolver.concreteIndexNames(context, "foo", "baz*"); fail(); } catch (IndexNotFoundException e) { assertThat(e.getIndex().getName(), equalTo("baz*")); } - results = indexNameExpressionResolver.concreteIndices(context, "foofoobar"); + results = indexNameExpressionResolver.concreteIndexNames(context, "foofoobar"); assertEquals(2, results.length); assertThat(results, arrayContainingInAnyOrder("foo", "foobar")); } @@ -398,20 +398,20 @@ public class IndexNameExpressionResolverTests extends ESTestCase { IndicesOptions noExpandStrict = IndicesOptions.fromOptions(false, false, false, false); IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, noExpandStrict); try { - indexNameExpressionResolver.concreteIndices(context, "baz*"); + indexNameExpressionResolver.concreteIndexNames(context, "baz*"); fail(); } catch (IndexNotFoundException e) { assertThat(e.getIndex().getName(), equalTo("baz*")); } try { - indexNameExpressionResolver.concreteIndices(context, "foo", "baz*"); + indexNameExpressionResolver.concreteIndexNames(context, "foo", "baz*"); fail(); } catch (IndexNotFoundException e) { assertThat(e.getIndex().getName(), equalTo("baz*")); } - String[] results = indexNameExpressionResolver.concreteIndices(context, "foofoobar"); + String[] results = indexNameExpressionResolver.concreteIndexNames(context, "foofoobar"); assertEquals(2, results.length); assertThat(results, arrayContainingInAnyOrder("foo", "foobar")); } @@ -429,7 +429,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase { try { IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); - indexNameExpressionResolver.concreteIndices(context, "baz*"); + indexNameExpressionResolver.concreteIndexNames(context, "baz*"); fail(); } catch (IndexNotFoundException e) { assertThat(e.getIndex().getName(), equalTo("baz*")); @@ -437,7 +437,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase { try { IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); - indexNameExpressionResolver.concreteIndices(context, "foo", "baz*"); + indexNameExpressionResolver.concreteIndexNames(context, "foo", "baz*"); fail(); } catch (IndexNotFoundException e) { assertThat(e.getIndex().getName(), equalTo("baz*")); @@ -445,7 +445,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase { try { IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); - indexNameExpressionResolver.concreteIndices(context, "foofoobar"); + indexNameExpressionResolver.concreteIndexNames(context, "foofoobar"); fail(); } catch(IllegalArgumentException e) { assertThat(e.getMessage(), containsString("Alias [foofoobar] has more than one indices associated with it")); @@ -453,7 +453,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase { try { IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); - indexNameExpressionResolver.concreteIndices(context, "foo", "foofoobar"); + indexNameExpressionResolver.concreteIndexNames(context, "foo", "foofoobar"); fail(); } catch(IllegalArgumentException e) { assertThat(e.getMessage(), containsString("Alias [foofoobar] has more than one indices associated with it")); @@ -461,7 +461,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase { try { IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); - indexNameExpressionResolver.concreteIndices(context, "foofoo-closed", "foofoobar"); + indexNameExpressionResolver.concreteIndexNames(context, "foofoo-closed", "foofoobar"); fail(); } catch(IndexClosedException e) { assertThat(e.getMessage(), equalTo("closed")); @@ -469,7 +469,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase { } IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); - String[] results = indexNameExpressionResolver.concreteIndices(context, "foo", "barbaz"); + String[] results = indexNameExpressionResolver.concreteIndexNames(context, "foo", "barbaz"); assertEquals(2, results.length); assertThat(results, arrayContainingInAnyOrder("foo", "foofoo")); } @@ -479,18 +479,18 @@ public class IndexNameExpressionResolverTests extends ESTestCase { IndicesOptions options = IndicesOptions.strictExpandOpen(); IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, options); - String[] results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY); + String[] results = indexNameExpressionResolver.concreteIndexNames(context, Strings.EMPTY_ARRAY); assertThat(results, emptyArray()); try { - indexNameExpressionResolver.concreteIndices(context, "foo"); + indexNameExpressionResolver.concreteIndexNames(context, "foo"); fail(); } catch (IndexNotFoundException e) { assertThat(e.getIndex().getName(), equalTo("foo")); } - results = indexNameExpressionResolver.concreteIndices(context, "foo*"); + results = indexNameExpressionResolver.concreteIndexNames(context, "foo*"); assertThat(results, emptyArray()); try { - indexNameExpressionResolver.concreteIndices(context, "foo*", "bar"); + indexNameExpressionResolver.concreteIndexNames(context, "foo*", "bar"); fail(); } catch (IndexNotFoundException e) { assertThat(e.getIndex().getName(), equalTo("bar")); @@ -498,18 +498,18 @@ public class IndexNameExpressionResolverTests extends ESTestCase { context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); - results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY); + results = indexNameExpressionResolver.concreteIndexNames(context, Strings.EMPTY_ARRAY); assertThat(results, emptyArray()); - results = indexNameExpressionResolver.concreteIndices(context, "foo"); + results = indexNameExpressionResolver.concreteIndexNames(context, "foo"); assertThat(results, emptyArray()); - results = indexNameExpressionResolver.concreteIndices(context, "foo*"); + results = indexNameExpressionResolver.concreteIndexNames(context, "foo*"); assertThat(results, emptyArray()); - results = indexNameExpressionResolver.concreteIndices(context, "foo*", "bar"); + results = indexNameExpressionResolver.concreteIndexNames(context, "foo*", "bar"); assertThat(results, emptyArray()); context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, false, true, false)); try { - indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY); + indexNameExpressionResolver.concreteIndexNames(context, Strings.EMPTY_ARRAY); } catch (IndexNotFoundException e) { assertThat(e.getResourceId().toString(), equalTo("[_all]")); } @@ -527,7 +527,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase { IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictExpandOpen()); try { - indexNameExpressionResolver.concreteIndices(context, "testZZZ"); + indexNameExpressionResolver.concreteIndexNames(context, "testZZZ"); fail("Expected IndexNotFoundException"); } catch(IndexNotFoundException e) { assertThat(e.getMessage(), is("no such index")); @@ -541,7 +541,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase { ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); - assertThat(newHashSet(indexNameExpressionResolver.concreteIndices(context, "testXXX", "testZZZ")), equalTo(newHashSet("testXXX"))); + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testXXX", "testZZZ")), equalTo(newHashSet("testXXX"))); } public void testConcreteIndicesIgnoreIndicesAllMissing() { @@ -552,7 +552,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase { IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictExpandOpen()); try { - indexNameExpressionResolver.concreteIndices(context, "testMo", "testMahdy"); + indexNameExpressionResolver.concreteIndexNames(context, "testMo", "testMahdy"); fail("Expected IndexNotFoundException"); } catch(IndexNotFoundException e) { assertThat(e.getMessage(), is("no such index")); @@ -565,7 +565,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase { .put(indexBuilder("kuku")); ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); - assertThat(newHashSet(indexNameExpressionResolver.concreteIndices(context, new String[]{})), equalTo(newHashSet("kuku", "testXXX"))); + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, new String[]{})), equalTo(newHashSet("kuku", "testXXX"))); } public void testConcreteIndicesWildcardExpansion() { @@ -578,13 +578,13 @@ public class IndexNameExpressionResolverTests extends ESTestCase { ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, true, false, false)); - assertThat(newHashSet(indexNameExpressionResolver.concreteIndices(context, "testX*")), equalTo(new HashSet())); + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testX*")), equalTo(new HashSet())); context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, true, true, false)); - assertThat(newHashSet(indexNameExpressionResolver.concreteIndices(context, "testX*")), equalTo(newHashSet("testXXX", "testXXY"))); + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testX*")), equalTo(newHashSet("testXXX", "testXXY"))); context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, true, false, true)); - assertThat(newHashSet(indexNameExpressionResolver.concreteIndices(context, "testX*")), equalTo(newHashSet("testXYY"))); + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testX*")), equalTo(newHashSet("testXYY"))); context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, true, true, true)); - assertThat(newHashSet(indexNameExpressionResolver.concreteIndices(context, "testX*")), equalTo(newHashSet("testXXX", "testXXY", "testXYY"))); + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testX*")), equalTo(newHashSet("testXXX", "testXXY", "testXYY"))); } /** @@ -610,7 +610,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase { // with no indices, asking for all indices should return empty list or exception, depending on indices options if (indicesOptions.allowNoIndices()) { - String[] concreteIndices = indexNameExpressionResolver.concreteIndices(context, allIndices); + String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(context, allIndices); assertThat(concreteIndices, notNullValue()); assertThat(concreteIndices.length, equalTo(0)); } else { @@ -625,7 +625,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase { state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); context = new IndexNameExpressionResolver.Context(state, indicesOptions); if (indicesOptions.expandWildcardsOpen() || indicesOptions.expandWildcardsClosed() || indicesOptions.allowNoIndices()) { - String[] concreteIndices = indexNameExpressionResolver.concreteIndices(context, allIndices); + String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(context, allIndices); assertThat(concreteIndices, notNullValue()); int expectedNumberOfIndices = 0; if (indicesOptions.expandWildcardsOpen()) { @@ -646,7 +646,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase { */ private void checkCorrectException(IndexNameExpressionResolver indexNameExpressionResolver, IndexNameExpressionResolver.Context context, String[] allIndices) { try { - indexNameExpressionResolver.concreteIndices(context, allIndices); + indexNameExpressionResolver.concreteIndexNames(context, allIndices); fail("wildcard expansion on should trigger IndexMissingException"); } catch (IndexNotFoundException e) { // expected @@ -668,12 +668,12 @@ public class IndexNameExpressionResolverTests extends ESTestCase { // asking for non existing wildcard pattern should return empty list or exception if (indicesOptions.allowNoIndices()) { - String[] concreteIndices = indexNameExpressionResolver.concreteIndices(context, "Foo*"); + String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(context, "Foo*"); assertThat(concreteIndices, notNullValue()); assertThat(concreteIndices.length, equalTo(0)); } else { try { - indexNameExpressionResolver.concreteIndices(context, "Foo*"); + indexNameExpressionResolver.concreteIndexNames(context, "Foo*"); fail("expecting exception when result empty and allowNoIndicec=false"); } catch (IndexNotFoundException e) { // expected exception @@ -798,51 +798,51 @@ public class IndexNameExpressionResolverTests extends ESTestCase { IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictExpandOpenAndForbidClosed()); try { - indexNameExpressionResolver.concreteIndices(context, "foo1-closed"); + indexNameExpressionResolver.concreteIndexNames(context, "foo1-closed"); fail("foo1-closed should be closed, but it is open"); } catch (IndexClosedException e) { // expected } try { - indexNameExpressionResolver.concreteIndices(context, "foobar1-closed"); + indexNameExpressionResolver.concreteIndexNames(context, "foobar1-closed"); fail("foo1-closed should be closed, but it is open"); } catch (IndexClosedException e) { // expected } context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, context.getOptions().allowNoIndices(), context.getOptions().expandWildcardsOpen(), context.getOptions().expandWildcardsClosed(), context.getOptions())); - String[] results = indexNameExpressionResolver.concreteIndices(context, "foo1-closed"); + String[] results = indexNameExpressionResolver.concreteIndexNames(context, "foo1-closed"); assertThat(results, emptyArray()); - results = indexNameExpressionResolver.concreteIndices(context, "foobar1-closed"); + results = indexNameExpressionResolver.concreteIndexNames(context, "foobar1-closed"); assertThat(results, emptyArray()); context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); - results = indexNameExpressionResolver.concreteIndices(context, "foo1-closed"); + results = indexNameExpressionResolver.concreteIndexNames(context, "foo1-closed"); assertThat(results, arrayWithSize(1)); assertThat(results, arrayContaining("foo1-closed")); - results = indexNameExpressionResolver.concreteIndices(context, "foobar1-closed"); + results = indexNameExpressionResolver.concreteIndexNames(context, "foobar1-closed"); assertThat(results, arrayWithSize(1)); assertThat(results, arrayContaining("foo1-closed")); // testing an alias pointing to three indices: context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictExpandOpenAndForbidClosed()); try { - indexNameExpressionResolver.concreteIndices(context, "foobar2-closed"); + indexNameExpressionResolver.concreteIndexNames(context, "foobar2-closed"); fail("foo2-closed should be closed, but it is open"); } catch (IndexClosedException e) { // expected } context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, context.getOptions().allowNoIndices(), context.getOptions().expandWildcardsOpen(), context.getOptions().expandWildcardsClosed(), context.getOptions())); - results = indexNameExpressionResolver.concreteIndices(context, "foobar2-closed"); + results = indexNameExpressionResolver.concreteIndexNames(context, "foobar2-closed"); assertThat(results, arrayWithSize(1)); assertThat(results, arrayContaining("foo3")); context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); - results = indexNameExpressionResolver.concreteIndices(context, "foobar2-closed"); + results = indexNameExpressionResolver.concreteIndexNames(context, "foobar2-closed"); assertThat(results, arrayWithSize(3)); assertThat(results, arrayContainingInAnyOrder("foo1-closed", "foo2-closed", "foo3")); } @@ -855,7 +855,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase { IndicesOptions.lenientExpandOpen(), IndicesOptions.strictExpandOpenAndForbidClosed()}; for (IndicesOptions options : indicesOptions) { IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, options); - String[] results = indexNameExpressionResolver.concreteIndices(context, "index1", "index1", "alias1"); + String[] results = indexNameExpressionResolver.concreteIndexNames(context, "index1", "index1", "alias1"); assertThat(results, equalTo(new String[]{"index1"})); } } @@ -875,11 +875,11 @@ public class IndexNameExpressionResolverTests extends ESTestCase { ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); - String[] strings = indexNameExpressionResolver.concreteIndices(context, "alias-*"); + String[] strings = indexNameExpressionResolver.concreteIndexNames(context, "alias-*"); assertArrayEquals(new String[] {"test-0"}, strings); context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictExpandOpen()); - strings = indexNameExpressionResolver.concreteIndices(context, "alias-*"); + strings = indexNameExpressionResolver.concreteIndexNames(context, "alias-*"); assertArrayEquals(new String[] {"test-0"}, strings); } diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java index a43da9e53fa..0d8784834fa 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java @@ -83,7 +83,7 @@ public class MetaDataIndexUpgradeServiceTests extends ESTestCase { .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_CREATION_DATE, 1) .put(IndexMetaData.SETTING_INDEX_UUID, "BOOM") - .put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.V_0_18_1_ID) + .put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.V_2_0_0_beta1) .put(indexSettings) .build(); IndexMetaData metaData = IndexMetaData.builder(name).settings(build).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java index d9cf9f0d790..744477d6722 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java @@ -47,6 +47,8 @@ public class WildcardExpressionResolverTests extends ESTestCase { assertThat(newHashSet(resolver.resolve(context, Arrays.asList("test*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testX*"))), equalTo(newHashSet("testXXX", "testXYY"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testX*", "kuku"))), equalTo(newHashSet("testXXX", "testXYY", "kuku"))); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY", "kuku"))); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("*", "-kuku"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); } public void testConvertWildcardsTests() { @@ -107,6 +109,18 @@ public class WildcardExpressionResolverTests extends ESTestCase { assertThat(newHashSet(resolver.resolve(context, Arrays.asList("*Y*X"))).size(), equalTo(0)); } + public void testAll() { + MetaData.Builder mdBuilder = MetaData.builder() + .put(indexBuilder("testXXX")) + .put(indexBuilder("testXYY")) + .put(indexBuilder("testYYY")); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); + IndexNameExpressionResolver.WildcardExpressionResolver resolver = new IndexNameExpressionResolver.WildcardExpressionResolver(); + + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("_all"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); + } + private IndexMetaData.Builder indexBuilder(String index) { return IndexMetaData.builder(index).settings(settings(Version.CURRENT).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java index d911a1175c7..da6f270a79d 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java @@ -109,7 +109,12 @@ public class PrimaryAllocationIT extends ESIntegTestCase { logger.info("--> check that old primary shard does not get promoted to primary again"); // kick reroute and wait for all shard states to be fetched client(master).admin().cluster().prepareReroute().get(); - assertBusy(() -> assertThat(internalCluster().getInstance(GatewayAllocator.class, master).getNumberOfInFlightFetch(), equalTo(0))); + assertBusy(new Runnable() { + @Override + public void run() { + assertThat(internalCluster().getInstance(GatewayAllocator.class, master).getNumberOfInFlightFetch(), equalTo(0)); + } + }); // kick reroute a second time and check that all shards are unassigned assertThat(client(master).admin().cluster().prepareReroute().get().getState().getRoutingNodes().unassigned().size(), equalTo(2)); } @@ -158,7 +163,7 @@ public class PrimaryAllocationIT extends ESIntegTestCase { for (IntObjectCursor> shardStoreStatuses : storeStatuses) { int shardId = shardStoreStatuses.key; IndicesShardStoresResponse.StoreStatus storeStatus = randomFrom(shardStoreStatuses.value); - logger.info("--> adding allocation command for shard " + shardId); + logger.info("--> adding allocation command for shard {}", shardId); // force allocation based on node id if (useStaleReplica) { rerouteBuilder.add(new AllocateStalePrimaryAllocationCommand("test", shardId, storeStatus.getNode().getId(), true)); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingServiceTests.java index 5c922f07e46..d35b896f705 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingServiceTests.java @@ -27,10 +27,10 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESAllocationTestCase; -import org.elasticsearch.test.cluster.TestClusterService; import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; import org.junit.Before; @@ -42,6 +42,8 @@ import java.util.concurrent.atomic.AtomicBoolean; import static java.util.Collections.singletonMap; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.createClusterService; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.setState; import static org.hamcrest.Matchers.equalTo; /** @@ -140,7 +142,7 @@ public class RoutingServiceTests extends ESAllocationTestCase { */ public void testDelayedUnassignedScheduleRerouteAfterDelayedReroute() throws Exception { final ThreadPool testThreadPool = new ThreadPool(getTestName()); - + ClusterService clusterService = createClusterService(testThreadPool); try { MockAllocationService allocation = createAllocationService(Settings.EMPTY, new DelayedShardsMockGatewayAllocator()); MetaData metaData = MetaData.builder() @@ -152,8 +154,8 @@ public class RoutingServiceTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData) .routingTable(RoutingTable.builder().addAsNew(metaData.index("short_delay")).addAsNew(metaData.index("long_delay")).build()) .nodes(DiscoveryNodes.builder() - .put(newNode("node0", singletonMap("data", Boolean.FALSE.toString()))).localNodeId("node0").masterNodeId("node0") - .put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).put(newNode("node4"))).build(); + .put(newNode("node0", singletonMap("data", Boolean.FALSE.toString()))).localNodeId("node0").masterNodeId("node0") + .put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).put(newNode("node4"))).build(); // allocate shards clusterState = ClusterState.builder(clusterState).routingResult(allocation.reroute(clusterState, "reroute")).build(); // start primaries @@ -209,8 +211,7 @@ public class RoutingServiceTests extends ESAllocationTestCase { // manually trigger a clusterChanged event on routingService ClusterState newState = clusterState; - // create fake cluster service - TestClusterService clusterService = new TestClusterService(newState, testThreadPool); + setState(clusterService, newState); // create routing service, also registers listener on cluster service RoutingService routingService = new RoutingService(Settings.EMPTY, testThreadPool, clusterService, allocation); routingService.start(); // just so performReroute does not prematurely return @@ -221,11 +222,12 @@ public class RoutingServiceTests extends ESAllocationTestCase { clusterService.addLast(event -> latch.countDown()); // instead of clusterService calling clusterChanged, we call it directly here routingService.clusterChanged(new ClusterChangedEvent("test", newState, prevState)); - // cluster service should have updated state and called routingService with clusterChanged + // cluster service should have updated state and called routingService with clusterChanged latch.await(); // verify the registration has been set to the delay of longDelayReplica/longDelayUnassignedReplica assertThat(routingService.getMinDelaySettingAtLastSchedulingNanos(), equalTo(TimeValue.timeValueSeconds(10).nanos())); } finally { + clusterService.stop(); terminate(testThreadPool); } } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java index 741d62d74e6..40e24338f00 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java @@ -63,7 +63,7 @@ public class RoutingTableTests extends ESAllocationTestCase { this.numberOfReplicas = randomIntBetween(1, 5); this.shardsPerIndex = this.numberOfShards * (this.numberOfReplicas + 1); this.totalNumberOfShards = this.shardsPerIndex * 2; - logger.info("Setup test with " + this.numberOfShards + " shards and " + this.numberOfReplicas + " replicas."); + logger.info("Setup test with {} shards and {} replicas.", this.numberOfShards, this.numberOfReplicas); this.emptyRoutingTable = new RoutingTable.Builder().build(); MetaData metaData = MetaData.builder() .put(createIndexMetaData(TEST_INDEX_1)) @@ -81,7 +81,7 @@ public class RoutingTableTests extends ESAllocationTestCase { * puts primary shard routings into initializing state */ private void initPrimaries() { - logger.info("adding " + (this.numberOfReplicas + 1) + " nodes and performing rerouting"); + logger.info("adding {} nodes and performing rerouting", this.numberOfReplicas + 1); Builder discoBuilder = DiscoveryNodes.builder(); for (int i = 0; i < this.numberOfReplicas + 1; i++) { discoBuilder = discoBuilder.put(newNode("node" + i)); @@ -95,7 +95,7 @@ public class RoutingTableTests extends ESAllocationTestCase { private void startInitializingShards(String index) { this.clusterState = ClusterState.builder(clusterState).routingTable(this.testRoutingTable).build(); - logger.info("start primary shards for index " + index); + logger.info("start primary shards for index {}", index); RoutingAllocation.Result rerouteResult = ALLOCATION_SERVICE.applyStartedShards(this.clusterState, this.clusterState.getRoutingNodes().shardsWithState(index, INITIALIZING)); this.clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build(); this.testRoutingTable = rerouteResult.routingTable(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java index 91ba1f4999c..1c5f77ce408 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java @@ -301,7 +301,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase { RoutingTable routingTable = routingTableBuilder.build(); - logger.info("start " + numberOfNodes + " nodes"); + logger.info("start {} nodes", numberOfNodes); DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(); for (int i = 0; i < numberOfNodes; i++) { nodes.put(newNode("node" + i)); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java index 8810fc47395..18f24504619 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java @@ -221,18 +221,10 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); - for (ShardRouting shard : clusterState.getRoutingNodes().shardsWithState(INITIALIZING)) { - logger.info(shard.toString()); - } - for (ShardRouting shard : clusterState.getRoutingNodes().shardsWithState(STARTED)) { - logger.info(shard.toString()); - } - for (ShardRouting shard : clusterState.getRoutingNodes().shardsWithState(RELOCATING)) { - logger.info(shard.toString()); - } - for (ShardRouting shard : clusterState.getRoutingNodes().shardsWithState(UNASSIGNED)) { - logger.info(shard.toString()); - } + logger.info("Initializing shards: {}", clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + logger.info("Started shards: {}", clusterState.getRoutingNodes().shardsWithState(STARTED)); + logger.info("Relocating shards: {}", clusterState.getRoutingNodes().shardsWithState(RELOCATING)); + logger.info("Unassigned shards: {}", clusterState.getRoutingNodes().shardsWithState(UNASSIGNED)); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(5)); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java index 707129578c9..56a66b52d6f 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java @@ -35,7 +35,6 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; -import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; @@ -311,29 +310,9 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { public void testNoRebalanceOnPrimaryOverload() { Settings.Builder settings = settingsBuilder(); AllocationService strategy = new AllocationService(settings.build(), randomAllocationDeciders(settings.build(), - new ClusterSettings(Settings.Builder.EMPTY_SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), getRandom()), new ShardsAllocators(settings.build(), + new ClusterSettings(Settings.Builder.EMPTY_SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), getRandom()), NoopGatewayAllocator.INSTANCE, new ShardsAllocator() { - @Override - public boolean rebalance(RoutingAllocation allocation) { - return false; - } - - @Override - public boolean moveShards(RoutingAllocation allocation) { - return false; - } - - @Override - public void applyStartedShards(StartedRerouteAllocation allocation) { - - - } - - @Override - public void applyFailedShards(FailedRerouteAllocation allocation) { - } - /* * // this allocator tries to rebuild this scenario where a rebalance is * // triggered solely by the primary overload on node [1] where a shard @@ -354,9 +333,8 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { --------[test][2], node[3], [P], s[STARTED] --------[test][3], node[3], [P], s[STARTED] ---- unassigned - */ - @Override - public boolean allocateUnassigned(RoutingAllocation allocation) { + */ + public boolean allocate(RoutingAllocation allocation) { RoutingNodes.UnassignedShards unassigned = allocation.routingNodes().unassigned(); boolean changed = !unassigned.isEmpty(); ShardRouting[] drain = unassigned.drain(); @@ -403,7 +381,7 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { } return changed; } - }), EmptyClusterInfoService.INSTANCE); + }, EmptyClusterInfoService.INSTANCE); MetaData.Builder metaDataBuilder = MetaData.builder(); RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); IndexMetaData.Builder indexMeta = IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/CatAllocationTestCase.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/CatAllocationTestCase.java index 1ba0c063255..be403510195 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/CatAllocationTestCase.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/CatAllocationTestCase.java @@ -147,12 +147,12 @@ public abstract class CatAllocationTestCase extends ESAllocationTestCase { if (initializing.isEmpty()) { break; } - logger.debug(initializing.toString()); + logger.debug("Initializing shards: {}", initializing); numRelocations += initializing.size(); routingTable = strategy.applyStartedShards(clusterState, initializing).routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); } - logger.debug("--> num relocations to get balance: " + numRelocations); + logger.debug("--> num relocations to get balance: {}", numRelocations); return clusterState; } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java index 4e5be0f26b7..3ec8df5cea6 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java @@ -36,7 +36,7 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; -import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators; +import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; @@ -333,7 +333,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { AllocationDeciders allocationDeciders = new AllocationDeciders(Settings.EMPTY, new AllocationDecider[] {new NodeVersionAllocationDecider(Settings.EMPTY)}); AllocationService strategy = new MockAllocationService(Settings.EMPTY, allocationDeciders, - new ShardsAllocators(Settings.EMPTY, NoopGatewayAllocator.INSTANCE), EmptyClusterInfoService.INSTANCE); + NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE); RoutingAllocation.Result result = strategy.reroute(state, new AllocationCommands(), true); // the two indices must stay as is, the replicas cannot move to oldNode2 because versions don't match state = ClusterState.builder(state).routingResult(result).build(); @@ -363,7 +363,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { new NodeVersionAllocationDecider(Settings.EMPTY)}); AllocationService strategy = new MockAllocationService(Settings.EMPTY, allocationDeciders, - new ShardsAllocators(Settings.EMPTY, NoopGatewayAllocator.INSTANCE), EmptyClusterInfoService.INSTANCE); + NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE); RoutingAllocation.Result result = strategy.reroute(state, new AllocationCommands(), true); // Make sure that primary shards are only allocated on the new node @@ -409,14 +409,16 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { String fromId = r.currentNodeId(); assertThat(fromId, notNullValue()); assertThat(toId, notNullValue()); - logger.trace("From: " + fromId + " with Version: " + routingNodes.node(fromId).node().version() + " to: " + toId + " with Version: " + routingNodes.node(toId).node().version()); + logger.trace("From: {} with Version: {} to: {} with Version: {}", fromId, routingNodes.node(fromId).node().version(), + toId, routingNodes.node(toId).node().version()); assertTrue(routingNodes.node(toId).node().version().onOrAfter(routingNodes.node(fromId).node().version())); } else { ShardRouting primary = routingNodes.activePrimary(r); assertThat(primary, notNullValue()); String fromId = primary.currentNodeId(); String toId = r.relocatingNodeId(); - logger.trace("From: " + fromId + " with Version: " + routingNodes.node(fromId).node().version() + " to: " + toId + " with Version: " + routingNodes.node(toId).node().version()); + logger.trace("From: {} with Version: {} to: {} with Version: {}", fromId, routingNodes.node(fromId).node().version(), + toId, routingNodes.node(toId).node().version()); assertTrue(routingNodes.node(toId).node().version().onOrAfter(routingNodes.node(fromId).node().version())); } } @@ -428,7 +430,8 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { assertThat(primary, notNullValue()); String fromId = primary.currentNodeId(); String toId = r.currentNodeId(); - logger.trace("From: " + fromId + " with Version: " + routingNodes.node(fromId).node().version() + " to: " + toId + " with Version: " + routingNodes.node(toId).node().version()); + logger.trace("From: {} with Version: {} to: {} with Version: {}", fromId, routingNodes.node(fromId).node().version(), + toId, routingNodes.node(toId).node().version()); assertTrue(routingNodes.node(toId).node().version().onOrAfter(routingNodes.node(fromId).node().version())); } } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java index abc561a0916..0bdab7a1158 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java @@ -30,7 +30,7 @@ import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; -import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators; +import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.Decision; @@ -59,7 +59,7 @@ public class RandomAllocationDeciderTests extends ESAllocationTestCase { RandomAllocationDecider randomAllocationDecider = new RandomAllocationDecider(getRandom()); AllocationService strategy = new AllocationService(settingsBuilder().build(), new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList(new SameShardAllocationDecider(Settings.EMPTY), new ReplicaAfterPrimaryActiveAllocationDecider(Settings.EMPTY), - randomAllocationDecider))), new ShardsAllocators(NoopGatewayAllocator.INSTANCE), EmptyClusterInfoService.INSTANCE); + randomAllocationDecider))), NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE); int indices = scaledRandomIntBetween(1, 20); Builder metaBuilder = MetaData.builder(); int maxNumReplicas = 1; diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java index 0830747a9dd..e220c8eb0f6 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java @@ -212,7 +212,7 @@ public class ShardsLimitAllocationTests extends ESAllocationTestCase { assertThat(shardRouting.getIndexName(), equalTo("test1")); } - logger.info("update " + ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey() + " for test, see that things move"); + logger.info("update {} for test, see that things move", ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey()); metaData = MetaData.builder(metaData) .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5) diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java index 0855263dd06..928756fec01 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java @@ -39,7 +39,7 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators; +import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.elasticsearch.cluster.routing.allocation.command.AllocationCommand; import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; @@ -65,10 +65,6 @@ import static org.hamcrest.Matchers.nullValue; public class DiskThresholdDeciderTests extends ESAllocationTestCase { - private static ShardsAllocators makeShardsAllocators() { - return new ShardsAllocators(NoopGatewayAllocator.INSTANCE); - } - public void testDiskThreshold() { Settings diskSettings = settingsBuilder() .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) @@ -109,7 +105,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) - .build(), deciders, makeShardsAllocators(), cis); + .build(), deciders, NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), cis); MetaData metaData = MetaData.builder() .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)) @@ -194,7 +190,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) - .build(), deciders, makeShardsAllocators(), cis); + .build(), deciders, NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), cis); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -225,7 +221,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) - .build(), deciders, makeShardsAllocators(), cis); + .build(), deciders, NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), cis); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -305,7 +301,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) - .build(), deciders, makeShardsAllocators(), cis); + .build(), deciders, NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), cis); MetaData metaData = MetaData.builder() .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2)) @@ -362,7 +358,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) - .build(), deciders, makeShardsAllocators(), cis); + .build(), deciders, NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), cis); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -429,7 +425,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) - .build(), deciders, makeShardsAllocators(), cis); + .build(), deciders, NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), cis); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -460,7 +456,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) - .build(), deciders, makeShardsAllocators(), cis); + .build(), deciders, NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), cis); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -569,7 +565,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) - .build(), deciders, makeShardsAllocators(), cis); + .build(), deciders, NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), cis); MetaData metaData = MetaData.builder() .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)) @@ -637,7 +633,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) - .build(), deciders, makeShardsAllocators(), cis); + .build(), deciders, NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), cis); MetaData metaData = MetaData.builder() .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)) @@ -740,7 +736,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) - .build(), deciders, makeShardsAllocators(), cis); + .build(), deciders, NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), cis); MetaData metaData = MetaData.builder() .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)) @@ -902,7 +898,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) - .build(), deciders, makeShardsAllocators(), cis); + .build(), deciders, NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), cis); // Ensure that the reroute call doesn't alter the routing table, since the first primary is relocating away // and therefor we will have sufficient disk space on node1. RoutingAllocation.Result result = strategy.reroute(clusterState, "reroute"); @@ -1003,7 +999,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) - .build(), deciders, makeShardsAllocators(), cis); + .build(), deciders, NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(Settings.EMPTY), cis); RoutingAllocation.Result result = strategy.reroute(clusterState, "reroute"); assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().state(), equalTo(STARTED)); diff --git a/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java new file mode 100644 index 00000000000..cce3a873b7b --- /dev/null +++ b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java @@ -0,0 +1,652 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster.service; + +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.LocalNodeMasterListener; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Priority; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.component.LifecycleComponent; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.inject.Singleton; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.discovery.zen.ZenDiscovery; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.ESIntegTestCase.ClusterScope; +import org.elasticsearch.test.ESIntegTestCase.Scope; +import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.elasticsearch.common.settings.Settings.settingsBuilder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +/** + * + */ +@ClusterScope(scope = Scope.TEST, numDataNodes = 0) +@ESIntegTestCase.SuppressLocalMode +public class ClusterServiceIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return pluginList(TestPlugin.class); + } + + public void testAckedUpdateTask() throws Exception { + Settings settings = settingsBuilder() + .put("discovery.type", "local") + .build(); + internalCluster().startNode(settings); + ClusterService clusterService = internalCluster().getInstance(ClusterService.class); + + final AtomicBoolean allNodesAcked = new AtomicBoolean(false); + final AtomicBoolean ackTimeout = new AtomicBoolean(false); + final AtomicBoolean onFailure = new AtomicBoolean(false); + final AtomicBoolean executed = new AtomicBoolean(false); + final CountDownLatch latch = new CountDownLatch(1); + final CountDownLatch processedLatch = new CountDownLatch(1); + clusterService.submitStateUpdateTask("test", new AckedClusterStateUpdateTask(null, null) { + @Override + protected Void newResponse(boolean acknowledged) { + return null; + } + + @Override + public boolean mustAck(DiscoveryNode discoveryNode) { + return true; + } + + @Override + public void onAllNodesAcked(@Nullable Throwable t) { + allNodesAcked.set(true); + latch.countDown(); + } + + @Override + public void onAckTimeout() { + ackTimeout.set(true); + latch.countDown(); + } + + @Override + public TimeValue ackTimeout() { + return TimeValue.timeValueSeconds(10); + } + + @Override + public TimeValue timeout() { + return TimeValue.timeValueSeconds(10); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + processedLatch.countDown(); + } + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + executed.set(true); + return ClusterState.builder(currentState).build(); + } + + @Override + public void onFailure(String source, Throwable t) { + logger.error("failed to execute callback in test {}", t, source); + onFailure.set(true); + latch.countDown(); + } + }); + + ensureGreen(); + assertThat(latch.await(1, TimeUnit.SECONDS), equalTo(true)); + + assertThat(allNodesAcked.get(), equalTo(true)); + assertThat(ackTimeout.get(), equalTo(false)); + assertThat(executed.get(), equalTo(true)); + assertThat(onFailure.get(), equalTo(false)); + + assertThat(processedLatch.await(1, TimeUnit.SECONDS), equalTo(true)); + } + + public void testAckedUpdateTaskSameClusterState() throws Exception { + Settings settings = settingsBuilder() + .put("discovery.type", "local") + .build(); + internalCluster().startNode(settings); + ClusterService clusterService = internalCluster().getInstance(ClusterService.class); + + final AtomicBoolean allNodesAcked = new AtomicBoolean(false); + final AtomicBoolean ackTimeout = new AtomicBoolean(false); + final AtomicBoolean onFailure = new AtomicBoolean(false); + final AtomicBoolean executed = new AtomicBoolean(false); + final CountDownLatch latch = new CountDownLatch(1); + final CountDownLatch processedLatch = new CountDownLatch(1); + clusterService.submitStateUpdateTask("test", new AckedClusterStateUpdateTask(null, null) { + @Override + protected Void newResponse(boolean acknowledged) { + return null; + } + + @Override + public void onAllNodesAcked(@Nullable Throwable t) { + allNodesAcked.set(true); + latch.countDown(); + } + + @Override + public void onAckTimeout() { + ackTimeout.set(true); + latch.countDown(); + } + + @Override + public TimeValue ackTimeout() { + return TimeValue.timeValueSeconds(10); + } + + @Override + public TimeValue timeout() { + return TimeValue.timeValueSeconds(10); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + processedLatch.countDown(); + } + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + executed.set(true); + return currentState; + } + + @Override + public void onFailure(String source, Throwable t) { + logger.error("failed to execute callback in test {}", t, source); + onFailure.set(true); + latch.countDown(); + } + }); + + ensureGreen(); + assertThat(latch.await(1, TimeUnit.SECONDS), equalTo(true)); + + assertThat(allNodesAcked.get(), equalTo(true)); + assertThat(ackTimeout.get(), equalTo(false)); + assertThat(executed.get(), equalTo(true)); + assertThat(onFailure.get(), equalTo(false)); + + assertThat(processedLatch.await(1, TimeUnit.SECONDS), equalTo(true)); + } + + public void testAckedUpdateTaskNoAckExpected() throws Exception { + Settings settings = settingsBuilder() + .put("discovery.type", "local") + .build(); + internalCluster().startNode(settings); + ClusterService clusterService = internalCluster().getInstance(ClusterService.class); + + final AtomicBoolean allNodesAcked = new AtomicBoolean(false); + final AtomicBoolean ackTimeout = new AtomicBoolean(false); + final AtomicBoolean onFailure = new AtomicBoolean(false); + final AtomicBoolean executed = new AtomicBoolean(false); + final CountDownLatch latch = new CountDownLatch(1); + clusterService.submitStateUpdateTask("test", new AckedClusterStateUpdateTask(null, null) { + @Override + protected Void newResponse(boolean acknowledged) { + return null; + } + + @Override + public boolean mustAck(DiscoveryNode discoveryNode) { + return false; + } + + @Override + public void onAllNodesAcked(@Nullable Throwable t) { + allNodesAcked.set(true); + latch.countDown(); + } + + @Override + public void onAckTimeout() { + ackTimeout.set(true); + latch.countDown(); + } + + @Override + public TimeValue ackTimeout() { + return TimeValue.timeValueSeconds(10); + } + + @Override + public TimeValue timeout() { + return TimeValue.timeValueSeconds(10); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + } + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + executed.set(true); + return ClusterState.builder(currentState).build(); + } + + @Override + public void onFailure(String source, Throwable t) { + logger.error("failed to execute callback in test {}", t, source); + onFailure.set(true); + latch.countDown(); + } + }); + + ensureGreen(); + assertThat(latch.await(1, TimeUnit.SECONDS), equalTo(true)); + + assertThat(allNodesAcked.get(), equalTo(true)); + assertThat(ackTimeout.get(), equalTo(false)); + assertThat(executed.get(), equalTo(true)); + assertThat(onFailure.get(), equalTo(false)); + } + + public void testAckedUpdateTaskTimeoutZero() throws Exception { + Settings settings = settingsBuilder() + .put("discovery.type", "local") + .build(); + internalCluster().startNode(settings); + ClusterService clusterService = internalCluster().getInstance(ClusterService.class); + + final AtomicBoolean allNodesAcked = new AtomicBoolean(false); + final AtomicBoolean ackTimeout = new AtomicBoolean(false); + final AtomicBoolean onFailure = new AtomicBoolean(false); + final AtomicBoolean executed = new AtomicBoolean(false); + final CountDownLatch latch = new CountDownLatch(1); + final CountDownLatch processedLatch = new CountDownLatch(1); + clusterService.submitStateUpdateTask("test", new AckedClusterStateUpdateTask(null, null) { + @Override + protected Void newResponse(boolean acknowledged) { + return null; + } + + @Override + public boolean mustAck(DiscoveryNode discoveryNode) { + return false; + } + + @Override + public void onAllNodesAcked(@Nullable Throwable t) { + allNodesAcked.set(true); + latch.countDown(); + } + + @Override + public void onAckTimeout() { + ackTimeout.set(true); + latch.countDown(); + } + + @Override + public TimeValue ackTimeout() { + return TimeValue.timeValueSeconds(0); + } + + @Override + public TimeValue timeout() { + return TimeValue.timeValueSeconds(10); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + processedLatch.countDown(); + } + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + executed.set(true); + return ClusterState.builder(currentState).build(); + } + + @Override + public void onFailure(String source, Throwable t) { + logger.error("failed to execute callback in test {}", t, source); + onFailure.set(true); + latch.countDown(); + } + }); + + ensureGreen(); + assertThat(latch.await(1, TimeUnit.SECONDS), equalTo(true)); + + assertThat(allNodesAcked.get(), equalTo(false)); + assertThat(ackTimeout.get(), equalTo(true)); + assertThat(executed.get(), equalTo(true)); + assertThat(onFailure.get(), equalTo(false)); + + assertThat(processedLatch.await(1, TimeUnit.SECONDS), equalTo(true)); + } + + @TestLogging("_root:debug,action.admin.cluster.tasks:trace") + public void testPendingUpdateTask() throws Exception { + Settings settings = settingsBuilder() + .put("discovery.type", "local") + .build(); + String node_0 = internalCluster().startNode(settings); + internalCluster().startNodeClient(settings); + + final ClusterService clusterService = internalCluster().getInstance(ClusterService.class, node_0); + final CountDownLatch block1 = new CountDownLatch(1); + final CountDownLatch invoked1 = new CountDownLatch(1); + clusterService.submitStateUpdateTask("1", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + invoked1.countDown(); + try { + block1.await(); + } catch (InterruptedException e) { + fail(); + } + return currentState; + } + + @Override + public void onFailure(String source, Throwable t) { + invoked1.countDown(); + fail(); + } + }); + invoked1.await(); + final CountDownLatch invoked2 = new CountDownLatch(9); + for (int i = 2; i <= 10; i++) { + clusterService.submitStateUpdateTask(Integer.toString(i), new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + return currentState; + } + + @Override + public void onFailure(String source, Throwable t) { + fail(); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + invoked2.countDown(); + } + }); + } + + // there might be other tasks in this node, make sure to only take the ones we add into account in this test + + // The tasks can be re-ordered, so we need to check out-of-order + Set controlSources = new HashSet<>(Arrays.asList("1", "2", "3", "4", "5", "6", "7", "8", "9", "10")); + List pendingClusterTasks = clusterService.pendingTasks(); + assertThat(pendingClusterTasks.size(), greaterThanOrEqualTo(10)); + assertThat(pendingClusterTasks.get(0).getSource().string(), equalTo("1")); + assertThat(pendingClusterTasks.get(0).isExecuting(), equalTo(true)); + for (PendingClusterTask task : pendingClusterTasks) { + controlSources.remove(task.getSource().string()); + } + assertTrue(controlSources.isEmpty()); + + controlSources = new HashSet<>(Arrays.asList("1", "2", "3", "4", "5", "6", "7", "8", "9", "10")); + PendingClusterTasksResponse response = internalCluster().clientNodeClient().admin().cluster().preparePendingClusterTasks().get(); + assertThat(response.pendingTasks().size(), greaterThanOrEqualTo(10)); + assertThat(response.pendingTasks().get(0).getSource().string(), equalTo("1")); + assertThat(response.pendingTasks().get(0).isExecuting(), equalTo(true)); + for (PendingClusterTask task : response) { + controlSources.remove(task.getSource().string()); + } + assertTrue(controlSources.isEmpty()); + block1.countDown(); + invoked2.await(); + + // whenever we test for no tasks, we need to awaitBusy since this is a live node + assertTrue(awaitBusy(() -> clusterService.pendingTasks().isEmpty())); + waitNoPendingTasksOnAll(); + + final CountDownLatch block2 = new CountDownLatch(1); + final CountDownLatch invoked3 = new CountDownLatch(1); + clusterService.submitStateUpdateTask("1", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + invoked3.countDown(); + try { + block2.await(); + } catch (InterruptedException e) { + fail(); + } + return currentState; + } + + @Override + public void onFailure(String source, Throwable t) { + invoked3.countDown(); + fail(); + } + }); + invoked3.await(); + + for (int i = 2; i <= 5; i++) { + clusterService.submitStateUpdateTask(Integer.toString(i), new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + return currentState; + } + + @Override + public void onFailure(String source, Throwable t) { + fail(); + } + }); + } + Thread.sleep(100); + + pendingClusterTasks = clusterService.pendingTasks(); + assertThat(pendingClusterTasks.size(), greaterThanOrEqualTo(5)); + controlSources = new HashSet<>(Arrays.asList("1", "2", "3", "4", "5")); + for (PendingClusterTask task : pendingClusterTasks) { + controlSources.remove(task.getSource().string()); + } + assertTrue(controlSources.isEmpty()); + + response = internalCluster().clientNodeClient().admin().cluster().preparePendingClusterTasks().get(); + assertThat(response.pendingTasks().size(), greaterThanOrEqualTo(5)); + controlSources = new HashSet<>(Arrays.asList("1", "2", "3", "4", "5")); + for (PendingClusterTask task : response) { + if (controlSources.remove(task.getSource().string())) { + assertThat(task.getTimeInQueueInMillis(), greaterThan(0L)); + } + } + assertTrue(controlSources.isEmpty()); + block2.countDown(); + } + + public void testLocalNodeMasterListenerCallbacks() throws Exception { + Settings settings = settingsBuilder() + .put("discovery.type", "zen") + .put("discovery.zen.minimum_master_nodes", 1) + .put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "400ms") + .put("discovery.initial_state_timeout", "500ms") + .build(); + + String node_0 = internalCluster().startNode(settings); + ClusterService clusterService = internalCluster().getInstance(ClusterService.class); + MasterAwareService testService = internalCluster().getInstance(MasterAwareService.class); + + ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID) + .setWaitForNodes("1").get(); + assertThat(clusterHealth.isTimedOut(), equalTo(false)); + + // the first node should be a master as the minimum required is 1 + assertThat(clusterService.state().nodes().masterNode(), notNullValue()); + assertThat(clusterService.state().nodes().localNodeMaster(), is(true)); + assertThat(testService.master(), is(true)); + + String node_1 = internalCluster().startNode(settings); + final ClusterService clusterService1 = internalCluster().getInstance(ClusterService.class, node_1); + MasterAwareService testService1 = internalCluster().getInstance(MasterAwareService.class, node_1); + + clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").get(); + assertThat(clusterHealth.isTimedOut(), equalTo(false)); + + // the second node should not be the master as node1 is already the master. + assertThat(clusterService1.state().nodes().localNodeMaster(), is(false)); + assertThat(testService1.master(), is(false)); + + internalCluster().stopCurrentMasterNode(); + clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("1").get(); + assertThat(clusterHealth.isTimedOut(), equalTo(false)); + + // now that node0 is closed, node1 should be elected as master + assertThat(clusterService1.state().nodes().localNodeMaster(), is(true)); + assertThat(testService1.master(), is(true)); + + // start another node and set min_master_node + internalCluster().startNode(Settings.builder().put(settings)); + assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut()); + + Settings transientSettings = settingsBuilder() + .put("discovery.zen.minimum_master_nodes", 2) + .build(); + client().admin().cluster().prepareUpdateSettings().setTransientSettings(transientSettings).get(); + + // and shutdown the second node + internalCluster().stopRandomNonMasterNode(); + + // there should not be any master as the minimum number of required eligible masters is not met + awaitBusy(() -> clusterService1.state().nodes().masterNode() == null && + clusterService1.state().status() == ClusterState.ClusterStateStatus.APPLIED); + assertThat(testService1.master(), is(false)); + + // bring the node back up + String node_2 = internalCluster().startNode(Settings.builder().put(settings).put(transientSettings)); + ClusterService clusterService2 = internalCluster().getInstance(ClusterService.class, node_2); + MasterAwareService testService2 = internalCluster().getInstance(MasterAwareService.class, node_2); + + // make sure both nodes see each other otherwise the masternode below could be null if node 2 is master and node 1 did'r receive + // the updated cluster state... + assertThat(internalCluster().client(node_1).admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setLocal(true) + .setWaitForNodes("2").get().isTimedOut(), is(false)); + assertThat(internalCluster().client(node_2).admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setLocal(true) + .setWaitForNodes("2").get().isTimedOut(), is(false)); + + // now that we started node1 again, a new master should be elected + assertThat(clusterService2.state().nodes().masterNode(), is(notNullValue())); + if (node_2.equals(clusterService2.state().nodes().masterNode().name())) { + assertThat(testService1.master(), is(false)); + assertThat(testService2.master(), is(true)); + } else { + assertThat(testService1.master(), is(true)); + assertThat(testService2.master(), is(false)); + } + } + + public static class TestPlugin extends Plugin { + + @Override + public String name() { + return "test plugin"; + } + + @Override + public String description() { + return "test plugin"; + } + + @Override + public Collection> nodeServices() { + List> services = new ArrayList<>(1); + services.add(MasterAwareService.class); + return services; + } + } + + @Singleton + public static class MasterAwareService extends AbstractLifecycleComponent implements LocalNodeMasterListener { + + private final ClusterService clusterService; + private volatile boolean master; + + @Inject + public MasterAwareService(Settings settings, ClusterService clusterService) { + super(settings); + clusterService.add(this); + this.clusterService = clusterService; + logger.info("initialized test service"); + } + + @Override + public void onMaster() { + logger.info("on master [{}]", clusterService.localNode()); + master = true; + } + + @Override + public void offMaster() { + logger.info("off master [{}]", clusterService.localNode()); + master = false; + } + + public boolean master() { + return master; + } + + @Override + protected void doStart() { + } + + @Override + protected void doStop() { + } + + @Override + protected void doClose() { + } + + @Override + public String executorName() { + return ThreadPool.Names.SAME; + } + + } +} diff --git a/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java new file mode 100644 index 00000000000..a62f99ab459 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java @@ -0,0 +1,804 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster.service; + +import org.apache.log4j.Level; +import org.apache.log4j.Logger; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateTaskConfig; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; +import org.elasticsearch.cluster.ClusterStateTaskListener; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.NodeConnectionsService; +import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.OperationRouting; +import org.elasticsearch.common.Priority; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.Semaphore; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.elasticsearch.cluster.service.ClusterServiceUtils.setState; +import static org.elasticsearch.common.settings.Settings.settingsBuilder; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class ClusterServiceTests extends ESTestCase { + + static ThreadPool threadPool; + TimedClusterService clusterService; + + @BeforeClass + public static void createThreadPool() { + threadPool = new ThreadPool(ClusterServiceTests.class.getName()); + } + + @AfterClass + public static void stopThreadPool() { + if (threadPool != null) { + threadPool.shutdownNow(); + threadPool = null; + } + } + + @Before + public void setUp() throws Exception { + super.setUp(); + clusterService = createTimedClusterService(true); + } + + @After + public void tearDown() throws Exception { + clusterService.close(); + super.tearDown(); + } + + TimedClusterService createTimedClusterService(boolean makeMaster) throws InterruptedException { + TimedClusterService timedClusterService = new TimedClusterService(Settings.EMPTY, null, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool, new ClusterName("ClusterServiceTests")); + timedClusterService.setLocalNode(new DiscoveryNode("node1", DummyTransportAddress.INSTANCE, Version.CURRENT)); + timedClusterService.setNodeConnectionsService(new NodeConnectionsService(Settings.EMPTY, null, null) { + @Override + public void connectToAddedNodes(ClusterChangedEvent event) { + // skip + } + + @Override + public void disconnectFromRemovedNodes(ClusterChangedEvent event) { + // skip + } + }); + timedClusterService.setClusterStatePublisher((event, ackListener) -> { + }); + timedClusterService.start(); + ClusterState state = timedClusterService.state(); + final DiscoveryNodes nodes = state.nodes(); + final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(nodes) + .masterNodeId(makeMaster ? nodes.localNodeId() : null); + state = ClusterState.builder(state).blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) + .nodes(nodesBuilder).build(); + setState(timedClusterService, state); + return timedClusterService; + } + + public void testTimeoutUpdateTask() throws Exception { + final CountDownLatch block = new CountDownLatch(1); + clusterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + try { + block.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + return currentState; + } + + @Override + public void onFailure(String source, Throwable t) { + throw new RuntimeException(t); + } + }); + + final CountDownLatch timedOut = new CountDownLatch(1); + final AtomicBoolean executeCalled = new AtomicBoolean(); + clusterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { + @Override + public TimeValue timeout() { + return TimeValue.timeValueMillis(2); + } + + @Override + public void onFailure(String source, Throwable t) { + timedOut.countDown(); + } + + @Override + public ClusterState execute(ClusterState currentState) { + executeCalled.set(true); + return currentState; + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + } + }); + + timedOut.await(); + block.countDown(); + final CountDownLatch allProcessed = new CountDownLatch(1); + clusterService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() { + @Override + public void onFailure(String source, Throwable t) { + throw new RuntimeException(t); + } + + @Override + public ClusterState execute(ClusterState currentState) { + allProcessed.countDown(); + return currentState; + } + + }); + allProcessed.await(); // executed another task to double check that execute on the timed out update task is not called... + assertThat(executeCalled.get(), equalTo(false)); + } + + + public void testMasterAwareExecution() throws Exception { + ClusterService nonMaster = createTimedClusterService(false); + + final boolean[] taskFailed = {false}; + final CountDownLatch latch1 = new CountDownLatch(1); + nonMaster.submitStateUpdateTask("test", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + latch1.countDown(); + return currentState; + } + + @Override + public void onFailure(String source, Throwable t) { + taskFailed[0] = true; + latch1.countDown(); + } + }); + + latch1.await(); + assertTrue("cluster state update task was executed on a non-master", taskFailed[0]); + + taskFailed[0] = true; + final CountDownLatch latch2 = new CountDownLatch(1); + nonMaster.submitStateUpdateTask("test", new ClusterStateUpdateTask() { + @Override + public boolean runOnlyOnMaster() { + return false; + } + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + taskFailed[0] = false; + latch2.countDown(); + return currentState; + } + + @Override + public void onFailure(String source, Throwable t) { + taskFailed[0] = true; + latch2.countDown(); + } + }); + latch2.await(); + assertFalse("non-master cluster state update task was not executed", taskFailed[0]); + + nonMaster.close(); + } + + /* + * test that a listener throwing an exception while handling a + * notification does not prevent publication notification to the + * executor + */ + public void testClusterStateTaskListenerThrowingExceptionIsOkay() throws InterruptedException { + final CountDownLatch latch = new CountDownLatch(1); + AtomicBoolean published = new AtomicBoolean(); + + clusterService.submitStateUpdateTask( + "testClusterStateTaskListenerThrowingExceptionIsOkay", + new Object(), + ClusterStateTaskConfig.build(Priority.NORMAL), + new ClusterStateTaskExecutor() { + @Override + public boolean runOnlyOnMaster() { + return false; + } + + @Override + public BatchResult execute(ClusterState currentState, List tasks) throws Exception { + ClusterState newClusterState = ClusterState.builder(currentState).build(); + return BatchResult.builder().successes(tasks).build(newClusterState); + } + + @Override + public void clusterStatePublished(ClusterState newClusterState) { + published.set(true); + latch.countDown(); + } + }, + new ClusterStateTaskListener() { + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + throw new IllegalStateException(source); + } + + @Override + public void onFailure(String source, Throwable t) { + } + } + ); + + latch.await(); + assertTrue(published.get()); + } + + // test that for a single thread, tasks are executed in the order + // that they are submitted + public void testClusterStateUpdateTasksAreExecutedInOrder() throws BrokenBarrierException, InterruptedException { + class TaskExecutor implements ClusterStateTaskExecutor { + List tasks = new ArrayList<>(); + + @Override + public BatchResult execute(ClusterState currentState, List tasks) throws Exception { + this.tasks.addAll(tasks); + return BatchResult.builder().successes(tasks).build(ClusterState.builder(currentState).build()); + } + + @Override + public boolean runOnlyOnMaster() { + return false; + } + } + + int numberOfThreads = randomIntBetween(2, 8); + TaskExecutor[] executors = new TaskExecutor[numberOfThreads]; + for (int i = 0; i < numberOfThreads; i++) { + executors[i] = new TaskExecutor(); + } + + int tasksSubmittedPerThread = randomIntBetween(2, 1024); + + CopyOnWriteArrayList> failures = new CopyOnWriteArrayList<>(); + CountDownLatch updateLatch = new CountDownLatch(numberOfThreads * tasksSubmittedPerThread); + + ClusterStateTaskListener listener = new ClusterStateTaskListener() { + @Override + public void onFailure(String source, Throwable t) { + logger.error("unexpected failure: [{}]", t, source); + failures.add(new Tuple<>(source, t)); + updateLatch.countDown(); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + updateLatch.countDown(); + } + }; + + CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads); + + for (int i = 0; i < numberOfThreads; i++) { + final int index = i; + Thread thread = new Thread(() -> { + try { + barrier.await(); + for (int j = 0; j < tasksSubmittedPerThread; j++) { + clusterService.submitStateUpdateTask("[" + index + "][" + j + "]", j, + ClusterStateTaskConfig.build(randomFrom(Priority.values())), executors[index], listener); + } + barrier.await(); + } catch (InterruptedException | BrokenBarrierException e) { + throw new AssertionError(e); + } + }); + thread.start(); + } + + // wait for all threads to be ready + barrier.await(); + // wait for all threads to finish + barrier.await(); + + updateLatch.await(); + + assertThat(failures, empty()); + + for (int i = 0; i < numberOfThreads; i++) { + assertEquals(tasksSubmittedPerThread, executors[i].tasks.size()); + for (int j = 0; j < tasksSubmittedPerThread; j++) { + assertNotNull(executors[i].tasks.get(j)); + assertEquals("cluster state update task executed out of order", j, (int) executors[i].tasks.get(j)); + } + } + } + + public void testClusterStateBatchedUpdates() throws BrokenBarrierException, InterruptedException { + AtomicInteger counter = new AtomicInteger(); + class Task { + private AtomicBoolean state = new AtomicBoolean(); + + public void execute() { + if (!state.compareAndSet(false, true)) { + throw new IllegalStateException(); + } else { + counter.incrementAndGet(); + } + } + } + + int numberOfThreads = randomIntBetween(2, 8); + int tasksSubmittedPerThread = randomIntBetween(1, 1024); + int numberOfExecutors = Math.max(1, numberOfThreads / 4); + final Semaphore semaphore = new Semaphore(numberOfExecutors); + + class TaskExecutor implements ClusterStateTaskExecutor { + private AtomicInteger counter = new AtomicInteger(); + private AtomicInteger batches = new AtomicInteger(); + private AtomicInteger published = new AtomicInteger(); + + @Override + public BatchResult execute(ClusterState currentState, List tasks) throws Exception { + tasks.forEach(task -> task.execute()); + counter.addAndGet(tasks.size()); + ClusterState maybeUpdatedClusterState = currentState; + if (randomBoolean()) { + maybeUpdatedClusterState = ClusterState.builder(currentState).build(); + batches.incrementAndGet(); + semaphore.acquire(); + } + return BatchResult.builder().successes(tasks).build(maybeUpdatedClusterState); + } + + @Override + public boolean runOnlyOnMaster() { + return false; + } + + @Override + public void clusterStatePublished(ClusterState newClusterState) { + published.incrementAndGet(); + semaphore.release(); + } + } + + ConcurrentMap counters = new ConcurrentHashMap<>(); + CountDownLatch updateLatch = new CountDownLatch(numberOfThreads * tasksSubmittedPerThread); + ClusterStateTaskListener listener = new ClusterStateTaskListener() { + @Override + public void onFailure(String source, Throwable t) { + assert false; + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + counters.computeIfAbsent(source, key -> new AtomicInteger()).incrementAndGet(); + updateLatch.countDown(); + } + }; + + List executors = new ArrayList<>(); + for (int i = 0; i < numberOfExecutors; i++) { + executors.add(new TaskExecutor()); + } + + // randomly assign tasks to executors + List assignments = new ArrayList<>(); + for (int i = 0; i < numberOfThreads; i++) { + for (int j = 0; j < tasksSubmittedPerThread; j++) { + assignments.add(randomFrom(executors)); + } + } + + Map counts = new HashMap<>(); + for (TaskExecutor executor : assignments) { + counts.merge(executor, 1, (previous, one) -> previous + one); + } + + CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads); + for (int i = 0; i < numberOfThreads; i++) { + final int index = i; + Thread thread = new Thread(() -> { + try { + barrier.await(); + for (int j = 0; j < tasksSubmittedPerThread; j++) { + ClusterStateTaskExecutor executor = assignments.get(index * tasksSubmittedPerThread + j); + clusterService.submitStateUpdateTask( + Thread.currentThread().getName(), + new Task(), + ClusterStateTaskConfig.build(randomFrom(Priority.values())), + executor, + listener); + } + barrier.await(); + } catch (BrokenBarrierException | InterruptedException e) { + throw new AssertionError(e); + } + }); + thread.start(); + } + + // wait for all threads to be ready + barrier.await(); + // wait for all threads to finish + barrier.await(); + + // wait until all the cluster state updates have been processed + updateLatch.await(); + // and until all of the publication callbacks have completed + semaphore.acquire(numberOfExecutors); + + // assert the number of executed tasks is correct + assertEquals(numberOfThreads * tasksSubmittedPerThread, counter.get()); + + // assert each executor executed the correct number of tasks + for (TaskExecutor executor : executors) { + if (counts.containsKey(executor)) { + assertEquals((int) counts.get(executor), executor.counter.get()); + assertEquals(executor.batches.get(), executor.published.get()); + } + } + + // assert the correct number of clusterStateProcessed events were triggered + for (Map.Entry entry : counters.entrySet()) { + assertEquals(entry.getValue().get(), tasksSubmittedPerThread); + } + } + + /** + * Note, this test can only work as long as we have a single thread executor executing the state update tasks! + */ + public void testPrioritizedTasks() throws Exception { + Settings settings = settingsBuilder() + .put("discovery.type", "local") + .build(); + BlockingTask block = new BlockingTask(Priority.IMMEDIATE); + clusterService.submitStateUpdateTask("test", block); + int taskCount = randomIntBetween(5, 20); + Priority[] priorities = Priority.values(); + + // will hold all the tasks in the order in which they were executed + List tasks = new ArrayList<>(taskCount); + CountDownLatch latch = new CountDownLatch(taskCount); + for (int i = 0; i < taskCount; i++) { + Priority priority = priorities[randomIntBetween(0, priorities.length - 1)]; + clusterService.submitStateUpdateTask("test", new PrioritizedTask(priority, latch, tasks)); + } + + block.release(); + latch.await(); + + Priority prevPriority = null; + for (PrioritizedTask task : tasks) { + if (prevPriority == null) { + prevPriority = task.priority(); + } else { + assertThat(task.priority().sameOrAfter(prevPriority), is(true)); + } + } + } + + @TestLogging("cluster:TRACE") // To ensure that we log cluster state events on TRACE level + public void testClusterStateUpdateLogging() throws Exception { + MockLogAppender mockAppender = new MockLogAppender(); + mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test1", "cluster.service", Level.DEBUG, + "*processing [test1]: took [1s] no change in cluster_state")); + mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test2", "cluster.service", Level.TRACE, + "*failed to execute cluster state update in [2s]*")); + mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test3", "cluster.service", Level.DEBUG, + "*processing [test3]: took [3s] done applying updated cluster_state (version: *, uuid: *)")); + + Logger rootLogger = Logger.getRootLogger(); + rootLogger.addAppender(mockAppender); + try { + final CountDownLatch latch = new CountDownLatch(4); + clusterService.currentTimeOverride = System.nanoTime(); + clusterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + clusterService.currentTimeOverride += TimeValue.timeValueSeconds(1).nanos(); + return currentState; + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + latch.countDown(); + } + + @Override + public void onFailure(String source, Throwable t) { + fail(); + } + }); + clusterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + clusterService.currentTimeOverride += TimeValue.timeValueSeconds(2).nanos(); + throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task"); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + fail(); + } + + @Override + public void onFailure(String source, Throwable t) { + latch.countDown(); + } + }); + clusterService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + clusterService.currentTimeOverride += TimeValue.timeValueSeconds(3).nanos(); + return ClusterState.builder(currentState).incrementVersion().build(); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + latch.countDown(); + } + + @Override + public void onFailure(String source, Throwable t) { + fail(); + } + }); + // Additional update task to make sure all previous logging made it to the logger + // We don't check logging for this on since there is no guarantee that it will occur before our check + clusterService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + return currentState; + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + latch.countDown(); + } + + @Override + public void onFailure(String source, Throwable t) { + fail(); + } + }); + latch.await(); + } finally { + rootLogger.removeAppender(mockAppender); + } + mockAppender.assertAllExpectationsMatched(); + } + + @TestLogging("cluster:WARN") // To ensure that we log cluster state events on WARN level + public void testLongClusterStateUpdateLogging() throws Exception { + MockLogAppender mockAppender = new MockLogAppender(); + mockAppender.addExpectation(new MockLogAppender.UnseenEventExpectation("test1 shouldn't see because setting is too low", + "cluster.service", Level.WARN, "*cluster state update task [test1] took [*] above the warn threshold of *")); + mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test2", "cluster.service", Level.WARN, + "*cluster state update task [test2] took [32s] above the warn threshold of *")); + mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test3", "cluster.service", Level.WARN, + "*cluster state update task [test3] took [33s] above the warn threshold of *")); + mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test4", "cluster.service", Level.WARN, + "*cluster state update task [test4] took [34s] above the warn threshold of *")); + + Logger rootLogger = Logger.getRootLogger(); + rootLogger.addAppender(mockAppender); + try { + final CountDownLatch latch = new CountDownLatch(5); + final CountDownLatch processedFirstTask = new CountDownLatch(1); + clusterService.currentTimeOverride = System.nanoTime(); + clusterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + clusterService.currentTimeOverride += TimeValue.timeValueSeconds(1).nanos(); + return currentState; + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + latch.countDown(); + processedFirstTask.countDown(); + } + + @Override + public void onFailure(String source, Throwable t) { + fail(); + } + }); + + processedFirstTask.await(); + clusterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + clusterService.currentTimeOverride += TimeValue.timeValueSeconds(32).nanos(); + throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task"); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + fail(); + } + + @Override + public void onFailure(String source, Throwable t) { + latch.countDown(); + } + }); + clusterService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + clusterService.currentTimeOverride += TimeValue.timeValueSeconds(33).nanos(); + return ClusterState.builder(currentState).incrementVersion().build(); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + latch.countDown(); + } + + @Override + public void onFailure(String source, Throwable t) { + fail(); + } + }); + clusterService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + clusterService.currentTimeOverride += TimeValue.timeValueSeconds(34).nanos(); + return currentState; + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + latch.countDown(); + } + + @Override + public void onFailure(String source, Throwable t) { + fail(); + } + }); + // Additional update task to make sure all previous logging made it to the logger + // We don't check logging for this on since there is no guarantee that it will occur before our check + clusterService.submitStateUpdateTask("test5", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + return currentState; + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + latch.countDown(); + } + + @Override + public void onFailure(String source, Throwable t) { + fail(); + } + }); + latch.await(); + } finally { + rootLogger.removeAppender(mockAppender); + } + mockAppender.assertAllExpectationsMatched(); + } + + private static class BlockingTask extends ClusterStateUpdateTask { + private final CountDownLatch latch = new CountDownLatch(1); + + public BlockingTask(Priority priority) { + super(priority); + } + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + latch.await(); + return currentState; + } + + @Override + public void onFailure(String source, Throwable t) { + } + + public void release() { + latch.countDown(); + } + + } + + private static class PrioritizedTask extends ClusterStateUpdateTask { + + private final CountDownLatch latch; + private final List tasks; + + private PrioritizedTask(Priority priority, CountDownLatch latch, List tasks) { + super(priority); + this.latch = latch; + this.tasks = tasks; + } + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + tasks.add(this); + latch.countDown(); + return currentState; + } + + @Override + public void onFailure(String source, Throwable t) { + latch.countDown(); + } + } + + static class TimedClusterService extends ClusterService { + + public volatile Long currentTimeOverride = null; + + public TimedClusterService(Settings settings, OperationRouting operationRouting, ClusterSettings clusterSettings, + ThreadPool threadPool, ClusterName clusterName) { + super(settings, operationRouting, clusterSettings, threadPool, clusterName); + } + + @Override + protected long currentTimeInNanos() { + if (currentTimeOverride != null) { + return currentTimeOverride; + } + return super.currentTimeInNanos(); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceUtils.java b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceUtils.java new file mode 100644 index 00000000000..28b921c82da --- /dev/null +++ b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceUtils.java @@ -0,0 +1,102 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster.service; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.NodeConnectionsService; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.concurrent.CountDownLatch; + +import static junit.framework.TestCase.fail; + +public class ClusterServiceUtils { + + public static ClusterService createClusterService(ThreadPool threadPool) { + ClusterService clusterService = new ClusterService(Settings.EMPTY, null, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool, new ClusterName("ClusterServiceTests")); + clusterService.setLocalNode(new DiscoveryNode("node", DummyTransportAddress.INSTANCE, Version.CURRENT)); + clusterService.setNodeConnectionsService(new NodeConnectionsService(Settings.EMPTY, null, null) { + @Override + public void connectToAddedNodes(ClusterChangedEvent event) { + // skip + } + + @Override + public void disconnectFromRemovedNodes(ClusterChangedEvent event) { + // skip + } + }); + clusterService.setClusterStatePublisher((event, ackListener) -> { + }); + clusterService.start(); + return clusterService; + } + + public static ClusterService createClusterService(ClusterState initialState, ThreadPool threadPool) { + ClusterService clusterService = createClusterService(threadPool); + setState(clusterService, initialState); + return clusterService; + } + + public static void setState(ClusterService clusterService, ClusterState.Builder clusterStateBuilder) { + setState(clusterService, clusterStateBuilder.build()); + } + + public static void setState(ClusterService clusterService, ClusterState clusterState) { + CountDownLatch latch = new CountDownLatch(1); + clusterService.submitStateUpdateTask("test setting state", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + // make sure we increment versions as listener may depend on it for change + return ClusterState.builder(clusterState).version(currentState.version() + 1).build(); + } + + @Override + public boolean runOnlyOnMaster() { + return false; + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + latch.countDown(); + } + + @Override + public void onFailure(String source, Throwable t) { + fail("unexpected exception" + t); + } + }); + try { + latch.await(); + } catch (InterruptedException e) { + throw new ElasticsearchException("unexpected interruption", e); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/cluster/settings/SettingsFilteringIT.java b/core/src/test/java/org/elasticsearch/cluster/settings/SettingsFilteringIT.java index ed03c918c31..e61bbc5f719 100644 --- a/core/src/test/java/org/elasticsearch/cluster/settings/SettingsFilteringIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/settings/SettingsFilteringIT.java @@ -22,19 +22,15 @@ package org.elasticsearch.cluster.settings; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; -import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import java.util.Collection; -import java.util.Collections; import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -50,6 +46,11 @@ public class SettingsFilteringIT extends ESIntegTestCase { } public static class SettingsFilteringPlugin extends Plugin { + public static final Setting SOME_NODE_SETTING = + Setting.boolSetting("some.node.setting", false, Property.NodeScope, Property.Filtered); + public static final Setting SOME_OTHER_NODE_SETTING = + Setting.boolSetting("some.other.node.setting", false, Property.NodeScope); + /** * The name of the plugin. */ @@ -72,10 +73,9 @@ public class SettingsFilteringIT extends ESIntegTestCase { } public void onModule(SettingsModule module) { - module.registerSetting(Setting.groupSetting("index.filter_test.", false, Setting.Scope.INDEX)); - module.registerSetting(Setting.boolSetting("some.node.setting", false, false, Setting.Scope.CLUSTER)); - module.registerSetting(Setting.boolSetting("some.other.node.setting", false, false, Setting.Scope.CLUSTER)); - module.registerSettingsFilter("some.node.setting"); + module.registerSetting(SOME_NODE_SETTING); + module.registerSetting(SOME_OTHER_NODE_SETTING); + module.registerSetting(Setting.groupSetting("index.filter_test.", Property.IndexScope)); module.registerSettingsFilter("index.filter_test.foo"); module.registerSettingsFilter("index.filter_test.bar*"); } @@ -104,8 +104,8 @@ public class SettingsFilteringIT extends ESIntegTestCase { for(NodeInfo info : nodeInfos.getNodes()) { Settings settings = info.getSettings(); assertNotNull(settings); - assertNull(settings.get("some.node.setting")); - assertTrue(settings.getAsBoolean("some.other.node.setting", false)); + assertNull(settings.get(SettingsFilteringPlugin.SOME_NODE_SETTING.getKey())); + assertTrue(settings.getAsBoolean(SettingsFilteringPlugin.SOME_OTHER_NODE_SETTING.getKey(), false)); assertEquals(settings.get("node.name"), info.getNode().getName()); } } diff --git a/core/src/test/java/org/elasticsearch/common/ParseFieldTests.java b/core/src/test/java/org/elasticsearch/common/ParseFieldTests.java index f4b8747ccdc..3770cd25c10 100644 --- a/core/src/test/java/org/elasticsearch/common/ParseFieldTests.java +++ b/core/src/test/java/org/elasticsearch/common/ParseFieldTests.java @@ -20,8 +20,7 @@ package org.elasticsearch.common; import org.elasticsearch.test.ESTestCase; -import java.util.EnumSet; - +import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.CoreMatchers.sameInstance; @@ -33,38 +32,29 @@ public class ParseFieldTests extends ESTestCase { String[] deprecated = new String[]{"barFoo", "bar_foo"}; ParseField withDeprecations = field.withDeprecation("Foobar", randomFrom(deprecated)); assertThat(field, not(sameInstance(withDeprecations))); - assertThat(field.match(randomFrom(values), ParseField.EMPTY_FLAGS), is(true)); - assertThat(field.match("foo bar", ParseField.EMPTY_FLAGS), is(false)); - assertThat(field.match(randomFrom(deprecated), ParseField.EMPTY_FLAGS), is(false)); - assertThat(field.match("barFoo", ParseField.EMPTY_FLAGS), is(false)); + assertThat(field.match(randomFrom(values), false), is(true)); + assertThat(field.match("foo bar", false), is(false)); + assertThat(field.match(randomFrom(deprecated), false), is(false)); + assertThat(field.match("barFoo", false), is(false)); - assertThat(withDeprecations.match(randomFrom(values), ParseField.EMPTY_FLAGS), is(true)); - assertThat(withDeprecations.match("foo bar", ParseField.EMPTY_FLAGS), is(false)); - assertThat(withDeprecations.match(randomFrom(deprecated), ParseField.EMPTY_FLAGS), is(true)); - assertThat(withDeprecations.match("barFoo", ParseField.EMPTY_FLAGS), is(true)); + assertThat(withDeprecations.match(randomFrom(values), false), is(true)); + assertThat(withDeprecations.match("foo bar", false), is(false)); + assertThat(withDeprecations.match(randomFrom(deprecated), false), is(true)); + assertThat(withDeprecations.match("barFoo", false), is(true)); // now with strict mode - EnumSet flags = EnumSet.of(ParseField.Flag.STRICT); - assertThat(field.match(randomFrom(values), flags), is(true)); - assertThat(field.match("foo bar", flags), is(false)); - assertThat(field.match(randomFrom(deprecated), flags), is(false)); - assertThat(field.match("barFoo", flags), is(false)); + assertThat(field.match(randomFrom(values), true), is(true)); + assertThat(field.match("foo bar", true), is(false)); + assertThat(field.match(randomFrom(deprecated), true), is(false)); + assertThat(field.match("barFoo", true), is(false)); - assertThat(withDeprecations.match(randomFrom(values), flags), is(true)); - assertThat(withDeprecations.match("foo bar", flags), is(false)); - try { - withDeprecations.match(randomFrom(deprecated), flags); - fail(); - } catch (IllegalArgumentException ex) { - - } - - try { - withDeprecations.match("barFoo", flags); - fail(); - } catch (IllegalArgumentException ex) { - - } + assertThat(withDeprecations.match(randomFrom(values), true), is(true)); + assertThat(withDeprecations.match("foo bar", true), is(false)); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> withDeprecations.match(randomFrom(deprecated), true)); + assertThat(e.getMessage(), containsString("used, expected [foo_bar] instead")); + e = expectThrows(IllegalArgumentException.class, () -> withDeprecations.match("barFoo", true)); + assertThat(e.getMessage(), containsString("Deprecated field [barFoo] used, expected [foo_bar] instead")); } public void testAllDeprecated() { @@ -72,30 +62,29 @@ public class ParseFieldTests extends ESTestCase { boolean withDeprecatedNames = randomBoolean(); String[] deprecated = new String[]{"text", "same_as_text"}; - String[] allValues = values; + String[] allValues; if (withDeprecatedNames) { - String[] newArray = new String[allValues.length + deprecated.length]; - System.arraycopy(allValues, 0, newArray, 0, allValues.length); - System.arraycopy(deprecated, 0, newArray, allValues.length, deprecated.length); + String[] newArray = new String[values.length + deprecated.length]; + System.arraycopy(values, 0, newArray, 0, values.length); + System.arraycopy(deprecated, 0, newArray, values.length, deprecated.length); allValues = newArray; + } else { + allValues = values; } - ParseField field = new ParseField(randomFrom(values)); + ParseField field; if (withDeprecatedNames) { - field = field.withDeprecation(deprecated); + field = new ParseField(randomFrom(values)).withDeprecation(deprecated).withAllDeprecated("like"); + } else { + field = new ParseField(randomFrom(values)).withAllDeprecated("like"); } - field = field.withAllDeprecated("like"); // strict mode off - assertThat(field.match(randomFrom(allValues), ParseField.EMPTY_FLAGS), is(true)); - assertThat(field.match("not a field name", ParseField.EMPTY_FLAGS), is(false)); + assertThat(field.match(randomFrom(allValues), false), is(true)); + assertThat(field.match("not a field name", false), is(false)); // now with strict mode - EnumSet flags = EnumSet.of(ParseField.Flag.STRICT); - try { - field.match(randomFrom(allValues), flags); - fail(); - } catch (IllegalArgumentException ex) { - } + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> field.match(randomFrom(allValues), true)); + assertThat(e.getMessage(), containsString(" used, replaced by [like]")); } } diff --git a/core/src/test/java/org/elasticsearch/common/cache/CacheTests.java b/core/src/test/java/org/elasticsearch/common/cache/CacheTests.java index 921c66f7acb..3b88a3bdcfe 100644 --- a/core/src/test/java/org/elasticsearch/common/cache/CacheTests.java +++ b/core/src/test/java/org/elasticsearch/common/cache/CacheTests.java @@ -55,7 +55,7 @@ public class CacheTests extends ESTestCase { public void setUp() throws Exception { super.setUp(); numberOfEntries = randomIntBetween(1000, 10000); - logger.debug("numberOfEntries: " + numberOfEntries); + logger.debug("numberOfEntries: {}", numberOfEntries); } // cache some entries, then randomly lookup keys that do not exist, then check the stats diff --git a/core/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java b/core/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java index 6e4d3867fde..566d2148cae 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java +++ b/core/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java @@ -19,13 +19,13 @@ package org.elasticsearch.common.geo; -import com.spatial4j.core.exception.InvalidShapeException; -import com.spatial4j.core.shape.Circle; -import com.spatial4j.core.shape.Rectangle; -import com.spatial4j.core.shape.Shape; -import com.spatial4j.core.shape.ShapeCollection; -import com.spatial4j.core.shape.jts.JtsGeometry; -import com.spatial4j.core.shape.jts.JtsPoint; +import org.locationtech.spatial4j.exception.InvalidShapeException; +import org.locationtech.spatial4j.shape.Circle; +import org.locationtech.spatial4j.shape.Rectangle; +import org.locationtech.spatial4j.shape.Shape; +import org.locationtech.spatial4j.shape.ShapeCollection; +import org.locationtech.spatial4j.shape.jts.JtsGeometry; +import org.locationtech.spatial4j.shape.jts.JtsPoint; import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.Geometry; import com.vividsolutions.jts.geom.GeometryFactory; diff --git a/core/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java index 06fadffc806..abbd6ce40aa 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java @@ -19,12 +19,12 @@ package org.elasticsearch.common.geo; -import com.spatial4j.core.exception.InvalidShapeException; -import com.spatial4j.core.shape.Circle; -import com.spatial4j.core.shape.Point; -import com.spatial4j.core.shape.Rectangle; -import com.spatial4j.core.shape.Shape; -import com.spatial4j.core.shape.impl.PointImpl; +import org.locationtech.spatial4j.exception.InvalidShapeException; +import org.locationtech.spatial4j.shape.Circle; +import org.locationtech.spatial4j.shape.Point; +import org.locationtech.spatial4j.shape.Rectangle; +import org.locationtech.spatial4j.shape.Shape; +import org.locationtech.spatial4j.shape.impl.PointImpl; import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.LineString; import com.vividsolutions.jts.geom.Polygon; diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/EnvelopeBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/EnvelopeBuilderTests.java index 305e57fbaf1..881db868ef9 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/EnvelopeBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/EnvelopeBuilderTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo.builders; -import com.spatial4j.core.shape.Rectangle; +import org.locationtech.spatial4j.shape.Rectangle; import com.vividsolutions.jts.geom.Coordinate; import org.elasticsearch.test.geo.RandomShapeGenerator; diff --git a/core/src/test/java/org/elasticsearch/common/io/stream/AbstractWriteableEnumTestCase.java b/core/src/test/java/org/elasticsearch/common/io/stream/AbstractWriteableEnumTestCase.java new file mode 100644 index 00000000000..b8be6fb1493 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/io/stream/AbstractWriteableEnumTestCase.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.io.stream; + +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; + +/** + * Abstract class offering base functionality for testing @{link Writeable} enums. + */ +public abstract class AbstractWriteableEnumTestCase extends ESTestCase { + + /** + * Test that the ordinals for the enum are consistent (i.e. the order hasn't changed) + * because writing an enum to a stream often uses the ordinal value. + */ + public abstract void testValidOrdinals(); + + /** + * Test that the conversion from a string to enum is correct. + */ + public abstract void testFromString(); + + /** + * Test that the correct enum value is produced from the serialized value in the {@link StreamInput}. + */ + public abstract void testReadFrom() throws IOException; + + /** + * Test that the correct serialized value is produced from the {@link StreamOutput}. + */ + public abstract void testWriteTo() throws IOException; + + // a convenience method for testing the write of a writeable enum + protected static void assertWriteToStream(final Writeable writeableEnum, final int ordinal) throws IOException { + try (BytesStreamOutput out = new BytesStreamOutput()) { + writeableEnum.writeTo(out); + try (StreamInput in = StreamInput.wrap(out.bytes())) { + assertThat(in.readVInt(), equalTo(ordinal)); + } + } + } + + // a convenience method for testing the read of a writeable enum + protected static > void assertReadFromStream(final int ordinal, final Writeable expected) throws IOException { + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.writeVInt(ordinal); + try (StreamInput in = StreamInput.wrap(out.bytes())) { + assertThat(expected.readFrom(in), equalTo(expected)); + } + } + } + +} diff --git a/core/src/test/java/org/elasticsearch/common/logging/LoggingConfigurationTests.java b/core/src/test/java/org/elasticsearch/common/logging/LoggingConfigurationTests.java index a6dda573304..5c812cca0a7 100644 --- a/core/src/test/java/org/elasticsearch/common/logging/LoggingConfigurationTests.java +++ b/core/src/test/java/org/elasticsearch/common/logging/LoggingConfigurationTests.java @@ -19,21 +19,21 @@ package org.elasticsearch.common.logging; -import org.apache.log4j.Appender; -import org.apache.log4j.Logger; -import org.elasticsearch.common.cli.CliToolTestCase; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.node.internal.InternalSettingsPreparer; -import org.elasticsearch.test.ESTestCase; -import org.junit.Before; - import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; import java.util.Arrays; +import org.apache.log4j.Appender; +import org.apache.log4j.Logger; +import org.elasticsearch.cli.MockTerminal; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.node.internal.InternalSettingsPreparer; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -162,7 +162,7 @@ public class LoggingConfigurationTests extends ESTestCase { .put("appender.console.type", "console") .put("appender.console.layout.type", "consolePattern") .put("appender.console.layout.conversionPattern", "[%d{ISO8601}][%-5p][%-25c] %m%n") - .build(), new CliToolTestCase.MockTerminal()); + .build(), new MockTerminal()); LogConfigurator.configure(environment.settings(), true); // args should overwrite whatever is in the config ESLogger esLogger = ESLoggerFactory.getLogger("test_resolve_order"); @@ -187,7 +187,7 @@ public class LoggingConfigurationTests extends ESTestCase { Settings.builder() .put(Environment.PATH_CONF_SETTING.getKey(), tmpDir.toAbsolutePath()) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .build(), new CliToolTestCase.MockTerminal()); + .build(), new MockTerminal()); LogConfigurator.configure(environment.settings(), false); ESLogger esLogger = ESLoggerFactory.getLogger("test_config_not_read"); diff --git a/core/src/test/java/org/elasticsearch/common/lucene/IndexCacheableQueryTests.java b/core/src/test/java/org/elasticsearch/common/lucene/IndexCacheableQueryTests.java deleted file mode 100644 index a4a5972e45b..00000000000 --- a/core/src/test/java/org/elasticsearch/common/lucene/IndexCacheableQueryTests.java +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.lucene; - -import org.apache.lucene.document.Document; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.MultiReader; -import org.apache.lucene.index.RandomIndexWriter; -import org.apache.lucene.index.Term; -import org.apache.lucene.search.Explanation; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.LRUQueryCache; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.QueryCachingPolicy; -import org.apache.lucene.search.QueryUtils; -import org.apache.lucene.search.Scorer; -import org.apache.lucene.search.Weight; -import org.apache.lucene.store.Directory; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; -import java.util.Set; - -public class IndexCacheableQueryTests extends ESTestCase { - - static class DummyIndexCacheableQuery extends IndexCacheableQuery { - @Override - public String toString(String field) { - return "DummyIndexCacheableQuery"; - } - - @Override - public Weight doCreateWeight(IndexSearcher searcher, boolean needsScores) throws IOException { - return new Weight(this) { - - @Override - public void extractTerms(Set terms) { - throw new UnsupportedOperationException(); - } - - @Override - public Explanation explain(LeafReaderContext context, int doc) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public float getValueForNormalization() throws IOException { - return 0; - } - - @Override - public void normalize(float norm, float topLevelBoost) { - } - - @Override - public Scorer scorer(LeafReaderContext context) throws IOException { - return null; - } - - }; - } - } - - public void testBasics() throws IOException { - DummyIndexCacheableQuery query = new DummyIndexCacheableQuery(); - QueryUtils.check(query); - - Query rewritten = query.rewrite(new MultiReader(new IndexReader[0])); - QueryUtils.check(rewritten); - QueryUtils.checkUnequal(query, rewritten); - - Query rewritten2 = query.rewrite(new MultiReader(new IndexReader[0])); - QueryUtils.check(rewritten2); - QueryUtils.checkUnequal(rewritten, rewritten2); - } - - public void testCache() throws IOException { - Directory dir = newDirectory(); - LRUQueryCache cache = new LRUQueryCache(10000, Long.MAX_VALUE); - QueryCachingPolicy policy = QueryCachingPolicy.ALWAYS_CACHE; - RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir); - for (int i = 0; i < 10; ++i) { - writer.addDocument(new Document()); - } - - IndexReader reader = writer.getReader(); - IndexSearcher searcher = newSearcher(reader); - reader = searcher.getIndexReader(); // reader might be wrapped - searcher.setQueryCache(cache); - searcher.setQueryCachingPolicy(policy); - - assertEquals(0, cache.getCacheSize()); - DummyIndexCacheableQuery query = new DummyIndexCacheableQuery(); - searcher.count(query); - int expectedCacheSize = reader.leaves().size(); - assertEquals(expectedCacheSize, cache.getCacheSize()); - searcher.count(query); - assertEquals(expectedCacheSize, cache.getCacheSize()); - - writer.addDocument(new Document()); - - IndexReader reader2 = writer.getReader(); - searcher = newSearcher(reader2); - reader2 = searcher.getIndexReader(); // reader might be wrapped - searcher.setQueryCache(cache); - searcher.setQueryCachingPolicy(policy); - - // since the query is only cacheable at the index level, it has to be recomputed on all leaves - expectedCacheSize += reader2.leaves().size(); - searcher.count(query); - assertEquals(expectedCacheSize, cache.getCacheSize()); - searcher.count(query); - assertEquals(expectedCacheSize, cache.getCacheSize()); - - reader.close(); - reader2.close(); - writer.close(); - assertEquals(0, cache.getCacheSize()); - dir.close(); - } - -} diff --git a/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java b/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java index 484b88f096f..8df6f5c78cc 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java @@ -84,7 +84,6 @@ public class LuceneTests extends ESTestCase { // now shadow engine should try to be created latch.countDown(); - dir.setEnableVirusScanner(false); IndexWriterConfig iwc = newIndexWriterConfig(); iwc.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE); iwc.setMergePolicy(NoMergePolicy.INSTANCE); @@ -104,7 +103,6 @@ public class LuceneTests extends ESTestCase { public void testCleanIndex() throws IOException { MockDirectoryWrapper dir = newMockDirectory(); - dir.setEnableVirusScanner(false); IndexWriterConfig iwc = newIndexWriterConfig(); iwc.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE); iwc.setMergePolicy(NoMergePolicy.INSTANCE); @@ -130,7 +128,7 @@ public class LuceneTests extends ESTestCase { writer.deleteDocuments(new Term("id", "2")); writer.commit(); - try (DirectoryReader open = DirectoryReader.open(writer, true)) { + try (DirectoryReader open = DirectoryReader.open(writer)) { assertEquals(3, open.numDocs()); assertEquals(1, open.numDeletedDocs()); assertEquals(4, open.maxDoc()); @@ -158,7 +156,6 @@ public class LuceneTests extends ESTestCase { public void testPruneUnreferencedFiles() throws IOException { MockDirectoryWrapper dir = newMockDirectory(); - dir.setEnableVirusScanner(false); IndexWriterConfig iwc = newIndexWriterConfig(); iwc.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE); iwc.setMergePolicy(NoMergePolicy.INSTANCE); @@ -186,7 +183,7 @@ public class LuceneTests extends ESTestCase { writer.deleteDocuments(new Term("id", "2")); writer.commit(); - DirectoryReader open = DirectoryReader.open(writer, true); + DirectoryReader open = DirectoryReader.open(writer); assertEquals(3, open.numDocs()); assertEquals(1, open.numDeletedDocs()); assertEquals(4, open.maxDoc()); @@ -215,7 +212,6 @@ public class LuceneTests extends ESTestCase { public void testFiles() throws IOException { MockDirectoryWrapper dir = newMockDirectory(); - dir.setEnableVirusScanner(false); IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random())); iwc.setMergePolicy(NoMergePolicy.INSTANCE); iwc.setMaxBufferedDocs(2); @@ -279,7 +275,6 @@ public class LuceneTests extends ESTestCase { public void testNumDocs() throws IOException { MockDirectoryWrapper dir = newMockDirectory(); - dir.setEnableVirusScanner(false); IndexWriterConfig iwc = newIndexWriterConfig(); IndexWriter writer = new IndexWriter(dir, iwc); Document doc = new Document(); @@ -369,6 +364,6 @@ public class LuceneTests extends ESTestCase { */ public void testMMapHackSupported() throws Exception { // add assume's here if needed for certain platforms, but we should know if it does not work. - assertTrue(MMapDirectory.UNMAP_SUPPORTED); + assertTrue("MMapDirectory does not support unmapping: " + MMapDirectory.UNMAP_NOT_SUPPORTED_REASON, MMapDirectory.UNMAP_SUPPORTED); } } diff --git a/core/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java b/core/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java index 9e7f54b8323..f2dc53e44cd 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java @@ -152,7 +152,7 @@ public class SimpleAllTests extends ESTestCase { indexWriter.addDocument(doc); - IndexReader reader = DirectoryReader.open(indexWriter, true); + IndexReader reader = DirectoryReader.open(indexWriter); IndexSearcher searcher = new IndexSearcher(reader); Query query = new AllTermQuery(new Term("_all", "else")); @@ -198,7 +198,7 @@ public class SimpleAllTests extends ESTestCase { indexWriter.addDocument(doc); - IndexReader reader = DirectoryReader.open(indexWriter, true); + IndexReader reader = DirectoryReader.open(indexWriter); IndexSearcher searcher = new IndexSearcher(reader); // this one is boosted. so the second doc is more relevant @@ -244,7 +244,7 @@ public class SimpleAllTests extends ESTestCase { indexWriter.addDocument(doc); - IndexReader reader = DirectoryReader.open(indexWriter, true); + IndexReader reader = DirectoryReader.open(indexWriter); assertEquals(2, reader.leaves().size()); IndexSearcher searcher = new IndexSearcher(reader); @@ -280,7 +280,7 @@ public class SimpleAllTests extends ESTestCase { indexWriter.addDocument(doc); - IndexReader reader = DirectoryReader.open(indexWriter, true); + IndexReader reader = DirectoryReader.open(indexWriter); IndexSearcher searcher = new IndexSearcher(reader); TopDocs docs = searcher.search(new AllTermQuery(new Term("_all", "else")), 10); @@ -330,7 +330,7 @@ public class SimpleAllTests extends ESTestCase { indexWriter.addDocument(doc); - IndexReader reader = DirectoryReader.open(indexWriter, true); + IndexReader reader = DirectoryReader.open(indexWriter); IndexSearcher searcher = new IndexSearcher(reader); TopDocs docs = searcher.search(new AllTermQuery(new Term("_all", "else")), 10); @@ -368,7 +368,7 @@ public class SimpleAllTests extends ESTestCase { indexWriter.addDocument(doc); - IndexReader reader = DirectoryReader.open(indexWriter, true); + IndexReader reader = DirectoryReader.open(indexWriter); IndexSearcher searcher = new IndexSearcher(reader); TopDocs docs = searcher.search(new MatchAllDocsQuery(), 10); diff --git a/core/src/test/java/org/elasticsearch/common/lucene/index/ESDirectoryReaderTests.java b/core/src/test/java/org/elasticsearch/common/lucene/index/ESDirectoryReaderTests.java index 7fb3ec0c2e9..817dabfece3 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/index/ESDirectoryReaderTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/index/ESDirectoryReaderTests.java @@ -55,7 +55,7 @@ public class ESDirectoryReaderTests extends ESTestCase { // open reader ShardId shardId = new ShardId("fake", "_na_", 1); - DirectoryReader ir = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(iw, true), shardId); + DirectoryReader ir = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(iw), shardId); assertEquals(2, ir.numDocs()); assertEquals(1, ir.leaves().size()); diff --git a/core/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java b/core/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java index 51d2ba77ec5..3d1b0fdb842 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java @@ -137,7 +137,7 @@ public class FreqTermsEnumTests extends ESTestCase { } // now go over each doc, build the relevant references and filter - reader = DirectoryReader.open(iw, true); + reader = DirectoryReader.open(iw); List filterTerms = new ArrayList<>(); for (int docId = 0; docId < reader.maxDoc(); docId++) { Document doc = reader.document(docId); diff --git a/core/src/test/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQueryTests.java b/core/src/test/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQueryTests.java index 9098289847e..23b6939fe7a 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQueryTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQueryTests.java @@ -27,15 +27,12 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; -import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.Query; import org.apache.lucene.store.RAMDirectory; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.test.ESTestCase; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; public class MultiPhrasePrefixQueryTests extends ESTestCase { public void testSimple() throws Exception { @@ -43,7 +40,7 @@ public class MultiPhrasePrefixQueryTests extends ESTestCase { Document doc = new Document(); doc.add(new Field("field", "aaa bbb ccc ddd", TextField.TYPE_NOT_STORED)); writer.addDocument(doc); - IndexReader reader = DirectoryReader.open(writer, true); + IndexReader reader = DirectoryReader.open(writer); IndexSearcher searcher = new IndexSearcher(reader); MultiPhrasePrefixQuery query = new MultiPhrasePrefixQuery(); @@ -66,22 +63,4 @@ public class MultiPhrasePrefixQueryTests extends ESTestCase { query.add(new Term("field", "xxx")); assertThat(searcher.count(query), equalTo(0)); } - - public void testBoost() throws Exception { - IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); - Document doc = new Document(); - doc.add(new Field("field", "aaa bbb", TextField.TYPE_NOT_STORED)); - writer.addDocument(doc); - doc = new Document(); - doc.add(new Field("field", "ccc ddd", TextField.TYPE_NOT_STORED)); - writer.addDocument(doc); - IndexReader reader = DirectoryReader.open(writer, true); - MultiPhrasePrefixQuery multiPhrasePrefixQuery = new MultiPhrasePrefixQuery(); - multiPhrasePrefixQuery.add(new Term[]{new Term("field", "aaa"), new Term("field", "bb")}); - multiPhrasePrefixQuery.setBoost(randomFloat()); - Query query = multiPhrasePrefixQuery.rewrite(reader); - assertThat(query, instanceOf(BoostQuery.class)); - BoostQuery boostQuery = (BoostQuery) query; - assertThat(boostQuery.getBoost(), equalTo(multiPhrasePrefixQuery.getBoost())); - } } \ No newline at end of file diff --git a/core/src/test/java/org/elasticsearch/common/lucene/search/morelikethis/MoreLikeThisQueryTests.java b/core/src/test/java/org/elasticsearch/common/lucene/search/morelikethis/MoreLikeThisQueryTests.java index b0e2ea873c4..0dcce74c1d2 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/search/morelikethis/MoreLikeThisQueryTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/search/morelikethis/MoreLikeThisQueryTests.java @@ -54,7 +54,7 @@ public class MoreLikeThisQueryTests extends ESTestCase { document.add(new TextField("text", "lucene release", Field.Store.YES)); indexWriter.addDocument(document); - IndexReader reader = DirectoryReader.open(indexWriter, true); + IndexReader reader = DirectoryReader.open(indexWriter); IndexSearcher searcher = new IndexSearcher(reader); MoreLikeThisQuery mltQuery = new MoreLikeThisQuery("lucene", new String[]{"text"}, Lucene.STANDARD_ANALYZER); diff --git a/core/src/test/java/org/elasticsearch/common/lucene/uid/VersionLookupTests.java b/core/src/test/java/org/elasticsearch/common/lucene/uid/VersionLookupTests.java index 1340d11616c..72b6b2b5eec 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/uid/VersionLookupTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/uid/VersionLookupTests.java @@ -51,7 +51,7 @@ public class VersionLookupTests extends ESTestCase { doc.add(new Field(UidFieldMapper.NAME, "6", UidFieldMapper.Defaults.FIELD_TYPE)); doc.add(new NumericDocValuesField(VersionFieldMapper.NAME, 87)); writer.addDocument(doc); - DirectoryReader reader = DirectoryReader.open(writer, false); + DirectoryReader reader = DirectoryReader.open(writer); LeafReaderContext segment = reader.leaves().get(0); PerThreadIDAndVersionLookup lookup = new PerThreadIDAndVersionLookup(segment.reader()); // found doc @@ -79,7 +79,7 @@ public class VersionLookupTests extends ESTestCase { doc.add(new NumericDocValuesField(VersionFieldMapper.NAME, 87)); writer.addDocument(doc); writer.addDocument(doc); - DirectoryReader reader = DirectoryReader.open(writer, false); + DirectoryReader reader = DirectoryReader.open(writer); LeafReaderContext segment = reader.leaves().get(0); PerThreadIDAndVersionLookup lookup = new PerThreadIDAndVersionLookup(segment.reader()); // return the last doc when there are duplicates diff --git a/core/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java b/core/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java index 1221445e8a6..7f405ea0531 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java @@ -78,7 +78,7 @@ public class VersionsTests extends ESTestCase { public void testVersions() throws Exception { Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); - DirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer, true), new ShardId("foo", "_na_", 1)); + DirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "_na_", 1)); MatcherAssert.assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(Versions.NOT_FOUND)); Document doc = new Document(); @@ -145,7 +145,7 @@ public class VersionsTests extends ESTestCase { docs.add(doc); writer.updateDocuments(new Term(UidFieldMapper.NAME, "1"), docs); - DirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer, true), new ShardId("foo", "_na_", 1)); + DirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "_na_", 1)); assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(5L)); assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(5L)); @@ -170,7 +170,7 @@ public class VersionsTests extends ESTestCase { Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); - DirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer, true), new ShardId("foo", "_na_", 1)); + DirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "_na_", 1)); MatcherAssert.assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(Versions.NOT_FOUND)); Document doc = new Document(); @@ -305,7 +305,7 @@ public class VersionsTests extends ESTestCase { doc.add(new Field(UidFieldMapper.NAME, "6", UidFieldMapper.Defaults.FIELD_TYPE)); doc.add(new NumericDocValuesField(VersionFieldMapper.NAME, 87)); writer.addDocument(doc); - DirectoryReader reader = DirectoryReader.open(writer, false); + DirectoryReader reader = DirectoryReader.open(writer); // should increase cache size by 1 assertEquals(87, Versions.loadVersion(reader, new Term(UidFieldMapper.NAME, "6"))); assertEquals(size+1, Versions.lookupStates.size()); @@ -330,7 +330,7 @@ public class VersionsTests extends ESTestCase { doc.add(new Field(UidFieldMapper.NAME, "6", UidFieldMapper.Defaults.FIELD_TYPE)); doc.add(new NumericDocValuesField(VersionFieldMapper.NAME, 87)); writer.addDocument(doc); - DirectoryReader reader = DirectoryReader.open(writer, false); + DirectoryReader reader = DirectoryReader.open(writer); assertEquals(87, Versions.loadVersion(reader, new Term(UidFieldMapper.NAME, "6"))); assertEquals(size+1, Versions.lookupStates.size()); // now wrap the reader diff --git a/core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java b/core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java index 4090daa23a0..bd794f96da3 100644 --- a/core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java +++ b/core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java @@ -19,12 +19,18 @@ package org.elasticsearch.common.network; +import org.elasticsearch.action.support.replication.ReplicationTask; import org.elasticsearch.client.Client; import org.elasticsearch.common.Table; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.ModuleTestCase; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.ToXContent.Params; import org.elasticsearch.http.HttpInfo; import org.elasticsearch.http.HttpServerAdapter; import org.elasticsearch.http.HttpServerTransport; @@ -36,10 +42,16 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.cat.AbstractCatAction; import org.elasticsearch.rest.action.cat.RestNodesAction; import org.elasticsearch.rest.action.main.RestMainAction; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.Task.Status; import org.elasticsearch.test.transport.AssertingLocalTransport; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; +import java.io.IOException; + +import static org.hamcrest.Matchers.sameInstance; + public class NetworkModuleTests extends ModuleTestCase { static class FakeTransportService extends TransportService { @@ -104,36 +116,36 @@ public class NetworkModuleTests extends ModuleTestCase { public void testRegisterTransportService() { Settings settings = Settings.builder().put(NetworkModule.TRANSPORT_SERVICE_TYPE_KEY, "custom").build(); - NetworkModule module = new NetworkModule(new NetworkService(settings), settings, false, null); + NetworkModule module = new NetworkModule(new NetworkService(settings), settings, false, new NamedWriteableRegistry()); module.registerTransportService("custom", FakeTransportService.class); assertBinding(module, TransportService.class, FakeTransportService.class); // check it works with transport only as well - module = new NetworkModule(new NetworkService(settings), settings, true, null); + module = new NetworkModule(new NetworkService(settings), settings, true, new NamedWriteableRegistry()); module.registerTransportService("custom", FakeTransportService.class); assertBinding(module, TransportService.class, FakeTransportService.class); } public void testRegisterTransport() { Settings settings = Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, "custom").build(); - NetworkModule module = new NetworkModule(new NetworkService(settings), settings, false, null); + NetworkModule module = new NetworkModule(new NetworkService(settings), settings, false, new NamedWriteableRegistry()); module.registerTransport("custom", FakeTransport.class); assertBinding(module, Transport.class, FakeTransport.class); // check it works with transport only as well - module = new NetworkModule(new NetworkService(settings), settings, true, null); + module = new NetworkModule(new NetworkService(settings), settings, true, new NamedWriteableRegistry()); module.registerTransport("custom", FakeTransport.class); assertBinding(module, Transport.class, FakeTransport.class); } public void testRegisterHttpTransport() { Settings settings = Settings.builder().put(NetworkModule.HTTP_TYPE_SETTING.getKey(), "custom").build(); - NetworkModule module = new NetworkModule(new NetworkService(settings), settings, false, null); + NetworkModule module = new NetworkModule(new NetworkService(settings), settings, false, new NamedWriteableRegistry()); module.registerHttpTransport("custom", FakeHttpTransport.class); assertBinding(module, HttpServerTransport.class, FakeHttpTransport.class); // check registration not allowed for transport only - module = new NetworkModule(new NetworkService(settings), settings, true, null); + module = new NetworkModule(new NetworkService(settings), settings, true, new NamedWriteableRegistry()); try { module.registerHttpTransport("custom", FakeHttpTransport.class); fail(); @@ -144,19 +156,19 @@ public class NetworkModuleTests extends ModuleTestCase { // not added if http is disabled settings = Settings.builder().put(NetworkModule.HTTP_ENABLED.getKey(), false).build(); - module = new NetworkModule(new NetworkService(settings), settings, false, null); + module = new NetworkModule(new NetworkService(settings), settings, false, new NamedWriteableRegistry()); assertNotBound(module, HttpServerTransport.class); } public void testRegisterRestHandler() { Settings settings = Settings.EMPTY; - NetworkModule module = new NetworkModule(new NetworkService(settings), settings, false, null); + NetworkModule module = new NetworkModule(new NetworkService(settings), settings, false, new NamedWriteableRegistry()); module.registerRestHandler(FakeRestHandler.class); // also check a builtin is bound assertSetMultiBinding(module, RestHandler.class, FakeRestHandler.class, RestMainAction.class); // check registration not allowed for transport only - module = new NetworkModule(new NetworkService(settings), settings, true, null); + module = new NetworkModule(new NetworkService(settings), settings, true, new NamedWriteableRegistry()); try { module.registerRestHandler(FakeRestHandler.class); fail(); @@ -168,9 +180,44 @@ public class NetworkModuleTests extends ModuleTestCase { public void testRegisterCatRestHandler() { Settings settings = Settings.EMPTY; - NetworkModule module = new NetworkModule(new NetworkService(settings), settings, false, null); + NetworkModule module = new NetworkModule(new NetworkService(settings), settings, false, new NamedWriteableRegistry()); module.registerRestHandler(FakeCatRestHandler.class); // also check a builtin is bound assertSetMultiBinding(module, AbstractCatAction.class, FakeCatRestHandler.class, RestNodesAction.class); } + + public void testRegisterTaskStatus() { + NamedWriteableRegistry registry = new NamedWriteableRegistry(); + Settings settings = Settings.EMPTY; + NetworkModule module = new NetworkModule(new NetworkService(settings), settings, false, registry); + + // Builtin prototype comes back + assertNotNull(registry.getPrototype(Task.Status.class, ReplicationTask.Status.PROTOTYPE.getWriteableName())); + + Task.Status dummy = new DummyTaskStatus(); + module.registerTaskStatus(dummy); + assertThat(registry.getPrototype(Task.Status.class, "dummy"), sameInstance(dummy)); + } + + private class DummyTaskStatus implements Task.Status { + @Override + public String getWriteableName() { + return "dummy"; + } + + @Override + public Status readFrom(StreamInput in) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + throw new UnsupportedOperationException(); + } + } } diff --git a/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java b/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java index 6cc9912924d..4ca50245140 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.index.IndexModule; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.TransportService; @@ -41,8 +42,8 @@ import java.util.function.Function; public class ScopedSettingsTests extends ESTestCase { public void testAddConsumer() { - Setting testSetting = Setting.intSetting("foo.bar", 1, true, Setting.Scope.CLUSTER); - Setting testSetting2 = Setting.intSetting("foo.bar.baz", 1, true, Setting.Scope.CLUSTER); + Setting testSetting = Setting.intSetting("foo.bar", 1, Property.Dynamic, Property.NodeScope); + Setting testSetting2 = Setting.intSetting("foo.bar.baz", 1, Property.Dynamic, Property.NodeScope); AbstractScopedSettings service = new ClusterSettings(Settings.EMPTY, Collections.singleton(testSetting)); AtomicInteger consumer = new AtomicInteger(); @@ -69,8 +70,8 @@ public class ScopedSettingsTests extends ESTestCase { } public void testApply() { - Setting testSetting = Setting.intSetting("foo.bar", 1, true, Setting.Scope.CLUSTER); - Setting testSetting2 = Setting.intSetting("foo.bar.baz", 1, true, Setting.Scope.CLUSTER); + Setting testSetting = Setting.intSetting("foo.bar", 1, Property.Dynamic, Property.NodeScope); + Setting testSetting2 = Setting.intSetting("foo.bar.baz", 1, Property.Dynamic, Property.NodeScope); AbstractScopedSettings service = new ClusterSettings(Settings.EMPTY, new HashSet<>(Arrays.asList(testSetting, testSetting2))); AtomicInteger consumer = new AtomicInteger(); @@ -139,7 +140,10 @@ public class ScopedSettingsTests extends ESTestCase { } public void testIsDynamic(){ - ClusterSettings settings = new ClusterSettings(Settings.EMPTY, new HashSet<>(Arrays.asList(Setting.intSetting("foo.bar", 1, true, Setting.Scope.CLUSTER), Setting.intSetting("foo.bar.baz", 1, false, Setting.Scope.CLUSTER)))); + ClusterSettings settings = + new ClusterSettings(Settings.EMPTY, + new HashSet<>(Arrays.asList(Setting.intSetting("foo.bar", 1, Property.Dynamic, Property.NodeScope), + Setting.intSetting("foo.bar.baz", 1, Property.NodeScope)))); assertFalse(settings.hasDynamicSetting("foo.bar.baz")); assertTrue(settings.hasDynamicSetting("foo.bar")); assertNotNull(settings.get("foo.bar.baz")); @@ -150,8 +154,8 @@ public class ScopedSettingsTests extends ESTestCase { } public void testDiff() throws IOException { - Setting foobarbaz = Setting.intSetting("foo.bar.baz", 1, false, Setting.Scope.CLUSTER); - Setting foobar = Setting.intSetting("foo.bar", 1, true, Setting.Scope.CLUSTER); + Setting foobarbaz = Setting.intSetting("foo.bar.baz", 1, Property.NodeScope); + Setting foobar = Setting.intSetting("foo.bar", 1, Property.Dynamic, Property.NodeScope); ClusterSettings settings = new ClusterSettings(Settings.EMPTY, new HashSet<>(Arrays.asList(foobar, foobarbaz))); Settings diff = settings.diff(Settings.builder().put("foo.bar", 5).build(), Settings.EMPTY); assertEquals(diff.getAsMap().size(), 1); @@ -216,6 +220,13 @@ public class ScopedSettingsTests extends ESTestCase { } catch (IllegalArgumentException e) { assertEquals("Failed to parse value [true] for setting [index.number_of_replicas]", e.getMessage()); } + + try { + settings.validate("index.similarity.classic.type", Settings.builder().put("index.similarity.classic.type", "mine").build()); + fail(); + } catch (IllegalArgumentException e) { + assertEquals("illegal value for [index.similarity.classic] cannot redefine built-in similarity", e.getMessage()); + } } @@ -240,22 +251,41 @@ public class ScopedSettingsTests extends ESTestCase { try { new IndexScopedSettings( - Settings.EMPTY, Collections.singleton(Setting.groupSetting("boo .", false, Setting.Scope.INDEX))); + Settings.EMPTY, Collections.singleton(Setting.groupSetting("foo.bar.", Property.IndexScope))); fail(); } catch (IllegalArgumentException e) { - assertEquals("illegal settings key: [boo .]", e.getMessage()); + assertEquals("illegal settings key: [foo.bar.] must start with [index.]", e.getMessage()); } - new IndexScopedSettings( - Settings.EMPTY, Collections.singleton(Setting.groupSetting("boo.", false, Setting.Scope.INDEX))); + try { new IndexScopedSettings( - Settings.EMPTY, Collections.singleton(Setting.boolSetting("boo.", true, false, Setting.Scope.INDEX))); + Settings.EMPTY, Collections.singleton(Setting.simpleString("foo.bar", Property.IndexScope))); fail(); } catch (IllegalArgumentException e) { - assertEquals("illegal settings key: [boo.]", e.getMessage()); + assertEquals("illegal settings key: [foo.bar] must start with [index.]", e.getMessage()); + } + + try { + new IndexScopedSettings( + Settings.EMPTY, Collections.singleton(Setting.groupSetting("index. foo.", Property.IndexScope))); + fail(); + } catch (IllegalArgumentException e) { + assertEquals("illegal settings key: [index. foo.]", e.getMessage()); } new IndexScopedSettings( - Settings.EMPTY, Collections.singleton(Setting.boolSetting("boo", true, false, Setting.Scope.INDEX))); + Settings.EMPTY, Collections.singleton(Setting.groupSetting("index.", Property.IndexScope))); + try { + new IndexScopedSettings( + Settings.EMPTY, Collections.singleton(Setting.boolSetting("index.", true, Property.IndexScope))); + fail(); + } catch (IllegalArgumentException e) { + assertEquals("illegal settings key: [index.]", e.getMessage()); + } + new IndexScopedSettings( + Settings.EMPTY, Collections.singleton(Setting.boolSetting("index.boo", true, Property.IndexScope))); + + new ClusterSettings( + Settings.EMPTY, Collections.singleton(Setting.boolSetting("index.boo", true, Property.NodeScope))); } public void testLoggingUpdates() { @@ -306,9 +336,9 @@ public class ScopedSettingsTests extends ESTestCase { public void testOverlappingComplexMatchSettings() { Set> settings = new LinkedHashSet<>(2); final boolean groupFirst = randomBoolean(); - final Setting groupSetting = Setting.groupSetting("foo.", false, Setting.Scope.CLUSTER); - final Setting listSetting = Setting.listSetting("foo.bar", Collections.emptyList(), Function.identity(), false, - Setting.Scope.CLUSTER); + final Setting groupSetting = Setting.groupSetting("foo.", Property.NodeScope); + final Setting listSetting = + Setting.listSetting("foo.bar", Collections.emptyList(), Function.identity(), Property.NodeScope); settings.add(groupFirst ? groupSetting : listSetting); settings.add(groupFirst ? listSetting : groupSetting); diff --git a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java index df2014f7855..14fdcb1e0ac 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.common.settings; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; @@ -28,24 +29,29 @@ import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; public class SettingTests extends ESTestCase { public void testGet() { - Setting booleanSetting = Setting.boolSetting("foo.bar", false, true, Setting.Scope.CLUSTER); + Setting booleanSetting = Setting.boolSetting("foo.bar", false, Property.Dynamic, Property.NodeScope); assertFalse(booleanSetting.get(Settings.EMPTY)); assertFalse(booleanSetting.get(Settings.builder().put("foo.bar", false).build())); assertTrue(booleanSetting.get(Settings.builder().put("foo.bar", true).build())); } public void testByteSize() { - Setting byteSizeValueSetting = Setting.byteSizeSetting("a.byte.size", new ByteSizeValue(1024), true, Setting.Scope.CLUSTER); + Setting byteSizeValueSetting = + Setting.byteSizeSetting("a.byte.size", new ByteSizeValue(1024), Property.Dynamic, Property.NodeScope); assertFalse(byteSizeValueSetting.isGroupSetting()); ByteSizeValue byteSizeValue = byteSizeValueSetting.get(Settings.EMPTY); assertEquals(byteSizeValue.bytes(), 1024); - byteSizeValueSetting = Setting.byteSizeSetting("a.byte.size", s -> "2048b", true, Setting.Scope.CLUSTER); + byteSizeValueSetting = Setting.byteSizeSetting("a.byte.size", s -> "2048b", Property.Dynamic, Property.NodeScope); byteSizeValue = byteSizeValueSetting.get(Settings.EMPTY); assertEquals(byteSizeValue.bytes(), 2048); @@ -64,7 +70,7 @@ public class SettingTests extends ESTestCase { } public void testSimpleUpdate() { - Setting booleanSetting = Setting.boolSetting("foo.bar", false, true, Setting.Scope.CLUSTER); + Setting booleanSetting = Setting.boolSetting("foo.bar", false, Property.Dynamic, Property.NodeScope); AtomicReference atomicBoolean = new AtomicReference<>(null); ClusterSettings.SettingUpdater settingUpdater = booleanSetting.newUpdater(atomicBoolean::set, logger); Settings build = Settings.builder().put("foo.bar", false).build(); @@ -85,7 +91,7 @@ public class SettingTests extends ESTestCase { } public void testUpdateNotDynamic() { - Setting booleanSetting = Setting.boolSetting("foo.bar", false, false, Setting.Scope.CLUSTER); + Setting booleanSetting = Setting.boolSetting("foo.bar", false, Property.NodeScope); assertFalse(booleanSetting.isGroupSetting()); AtomicReference atomicBoolean = new AtomicReference<>(null); try { @@ -97,7 +103,7 @@ public class SettingTests extends ESTestCase { } public void testUpdaterIsIsolated() { - Setting booleanSetting = Setting.boolSetting("foo.bar", false, true, Setting.Scope.CLUSTER); + Setting booleanSetting = Setting.boolSetting("foo.bar", false, Property.Dynamic, Property.NodeScope); AtomicReference ab1 = new AtomicReference<>(null); AtomicReference ab2 = new AtomicReference<>(null); ClusterSettings.SettingUpdater settingUpdater = booleanSetting.newUpdater(ab1::set, logger); @@ -108,24 +114,28 @@ public class SettingTests extends ESTestCase { public void testDefault() { TimeValue defautlValue = TimeValue.timeValueMillis(randomIntBetween(0, 1000000)); - Setting setting = Setting.positiveTimeSetting("my.time.value", defautlValue, randomBoolean(), Setting.Scope.CLUSTER); + Setting setting = + Setting.positiveTimeSetting("my.time.value", defautlValue, Property.NodeScope); assertFalse(setting.isGroupSetting()); String aDefault = setting.getDefaultRaw(Settings.EMPTY); assertEquals(defautlValue.millis() + "ms", aDefault); assertEquals(defautlValue.millis(), setting.get(Settings.EMPTY).millis()); assertEquals(defautlValue, setting.getDefault(Settings.EMPTY)); - Setting secondaryDefault = new Setting<>("foo.bar", (s) -> s.get("old.foo.bar", "some_default"), (s) -> s, randomBoolean(), Setting.Scope.CLUSTER); + Setting secondaryDefault = + new Setting<>("foo.bar", (s) -> s.get("old.foo.bar", "some_default"), Function.identity(), Property.NodeScope); assertEquals("some_default", secondaryDefault.get(Settings.EMPTY)); assertEquals("42", secondaryDefault.get(Settings.builder().put("old.foo.bar", 42).build())); - Setting secondaryDefaultViaSettings = new Setting<>("foo.bar", secondaryDefault, (s) -> s, randomBoolean(), Setting.Scope.CLUSTER); + Setting secondaryDefaultViaSettings = + new Setting<>("foo.bar", secondaryDefault, Function.identity(), Property.NodeScope); assertEquals("some_default", secondaryDefaultViaSettings.get(Settings.EMPTY)); assertEquals("42", secondaryDefaultViaSettings.get(Settings.builder().put("old.foo.bar", 42).build())); } public void testComplexType() { AtomicReference ref = new AtomicReference<>(null); - Setting setting = new Setting<>("foo.bar", (s) -> "", (s) -> new ComplexType(s), true, Setting.Scope.CLUSTER); + Setting setting = new Setting<>("foo.bar", (s) -> "", (s) -> new ComplexType(s), + Property.Dynamic, Property.NodeScope); assertFalse(setting.isGroupSetting()); ref.set(setting.get(Settings.EMPTY)); ComplexType type = ref.get(); @@ -146,15 +156,17 @@ public class SettingTests extends ESTestCase { } public void testType() { - Setting integerSetting = Setting.intSetting("foo.int.bar", 1, true, Setting.Scope.CLUSTER); - assertEquals(integerSetting.getScope(), Setting.Scope.CLUSTER); - integerSetting = Setting.intSetting("foo.int.bar", 1, true, Setting.Scope.INDEX); - assertEquals(integerSetting.getScope(), Setting.Scope.INDEX); + Setting integerSetting = Setting.intSetting("foo.int.bar", 1, Property.Dynamic, Property.NodeScope); + assertThat(integerSetting.hasNodeScope(), is(true)); + assertThat(integerSetting.hasIndexScope(), is(false)); + integerSetting = Setting.intSetting("foo.int.bar", 1, Property.Dynamic, Property.IndexScope); + assertThat(integerSetting.hasIndexScope(), is(true)); + assertThat(integerSetting.hasNodeScope(), is(false)); } public void testGroups() { AtomicReference ref = new AtomicReference<>(null); - Setting setting = Setting.groupSetting("foo.bar.", true, Setting.Scope.CLUSTER); + Setting setting = Setting.groupSetting("foo.bar.", Property.Dynamic, Property.NodeScope); assertTrue(setting.isGroupSetting()); ClusterSettings.SettingUpdater settingUpdater = setting.newUpdater(ref::set, logger); @@ -232,8 +244,8 @@ public class SettingTests extends ESTestCase { public void testComposite() { Composite c = new Composite(); - Setting a = Setting.intSetting("foo.int.bar.a", 1, true, Setting.Scope.CLUSTER); - Setting b = Setting.intSetting("foo.int.bar.b", 1, true, Setting.Scope.CLUSTER); + Setting a = Setting.intSetting("foo.int.bar.a", 1, Property.Dynamic, Property.NodeScope); + Setting b = Setting.intSetting("foo.int.bar.b", 1, Property.Dynamic, Property.NodeScope); ClusterSettings.SettingUpdater> settingUpdater = Setting.compoundUpdater(c::set, a, b, logger); assertFalse(settingUpdater.apply(Settings.EMPTY, Settings.EMPTY)); assertNull(c.a); @@ -261,7 +273,8 @@ public class SettingTests extends ESTestCase { } public void testListSettings() { - Setting> listSetting = Setting.listSetting("foo.bar", Arrays.asList("foo,bar"), (s) -> s.toString(), true, Setting.Scope.CLUSTER); + Setting> listSetting = Setting.listSetting("foo.bar", Arrays.asList("foo,bar"), (s) -> s.toString(), + Property.Dynamic, Property.NodeScope); List value = listSetting.get(Settings.EMPTY); assertEquals(1, value.size()); assertEquals("foo,bar", value.get(0)); @@ -300,7 +313,8 @@ public class SettingTests extends ESTestCase { assertEquals(1, ref.get().size()); assertEquals("foo,bar", ref.get().get(0)); - Setting> otherSettings = Setting.listSetting("foo.bar", Collections.emptyList(), Integer::parseInt, true, Setting.Scope.CLUSTER); + Setting> otherSettings = Setting.listSetting("foo.bar", Collections.emptyList(), Integer::parseInt, + Property.Dynamic, Property.NodeScope); List defaultValue = otherSettings.get(Settings.EMPTY); assertEquals(0, defaultValue.size()); List intValues = otherSettings.get(Settings.builder().put("foo.bar", "0,1,2,3").build()); @@ -309,7 +323,8 @@ public class SettingTests extends ESTestCase { assertEquals(i, intValues.get(i).intValue()); } - Setting> settingWithFallback = Setting.listSetting("foo.baz", listSetting, s -> s, true, Setting.Scope.CLUSTER); + Setting> settingWithFallback = Setting.listSetting("foo.baz", listSetting, Function.identity(), + Property.Dynamic, Property.NodeScope); value = settingWithFallback.get(Settings.EMPTY); assertEquals(1, value.size()); assertEquals("foo,bar", value.get(0)); @@ -331,7 +346,8 @@ public class SettingTests extends ESTestCase { } public void testListSettingAcceptsNumberSyntax() { - Setting> listSetting = Setting.listSetting("foo.bar", Arrays.asList("foo,bar"), (s) -> s.toString(), true, Setting.Scope.CLUSTER); + Setting> listSetting = Setting.listSetting("foo.bar", Arrays.asList("foo,bar"), (s) -> s.toString(), + Property.Dynamic, Property.NodeScope); List input = Arrays.asList("test", "test1, test2", "test", ",,,,"); Settings.Builder builder = Settings.builder().putArray("foo.bar", input.toArray(new String[0])); // try to parse this really annoying format @@ -348,8 +364,8 @@ public class SettingTests extends ESTestCase { assertTrue(listSetting.match("foo.bar." + randomIntBetween(0,10000))); } - public void testPrefixKeySetting() { - Setting setting = Setting.prefixKeySetting("foo.", "false", Boolean::parseBoolean, false, Setting.Scope.CLUSTER); + public void testDynamicKeySetting() { + Setting setting = Setting.prefixKeySetting("foo.", "false", Boolean::parseBoolean, Property.NodeScope); assertTrue(setting.hasComplexMatcher()); assertTrue(setting.match("foo.bar")); assertFalse(setting.match("foo")); @@ -366,7 +382,8 @@ public class SettingTests extends ESTestCase { } public void testAdfixKeySetting() { - Setting setting = Setting.adfixKeySetting("foo", "enable", "false", Boolean::parseBoolean, false, Setting.Scope.CLUSTER); + Setting setting = + Setting.adfixKeySetting("foo", "enable", "false", Boolean::parseBoolean, Property.NodeScope); assertTrue(setting.hasComplexMatcher()); assertTrue(setting.match("foo.bar.enable")); assertTrue(setting.match("foo.baz.enable")); @@ -387,7 +404,7 @@ public class SettingTests extends ESTestCase { } public void testMinMaxInt() { - Setting integerSetting = Setting.intSetting("foo.bar", 1, 0, 10, false, Setting.Scope.CLUSTER); + Setting integerSetting = Setting.intSetting("foo.bar", 1, 0, 10, Property.NodeScope); try { integerSetting.get(Settings.builder().put("foo.bar", 11).build()); fail(); @@ -405,4 +422,39 @@ public class SettingTests extends ESTestCase { assertEquals(5, integerSetting.get(Settings.builder().put("foo.bar", 5).build()).intValue()); assertEquals(1, integerSetting.get(Settings.EMPTY).intValue()); } + + /** + * Only one single scope can be added to any setting + */ + public void testMutuallyExclusiveScopes() { + // Those should pass + Setting setting = Setting.simpleString("foo.bar", Property.NodeScope); + assertThat(setting.hasNodeScope(), is(true)); + assertThat(setting.hasIndexScope(), is(false)); + setting = Setting.simpleString("foo.bar", Property.IndexScope); + assertThat(setting.hasIndexScope(), is(true)); + assertThat(setting.hasNodeScope(), is(false)); + + // We accept settings with no scope but they will be rejected when we register with SettingsModule.registerSetting + setting = Setting.simpleString("foo.bar"); + assertThat(setting.hasIndexScope(), is(false)); + assertThat(setting.hasNodeScope(), is(false)); + + // We accept settings with multiple scopes but they will be rejected when we register with SettingsModule.registerSetting + setting = Setting.simpleString("foo.bar", Property.IndexScope, Property.NodeScope); + assertThat(setting.hasIndexScope(), is(true)); + assertThat(setting.hasNodeScope(), is(true)); + } + + /** + * We can't have Null properties + */ + public void testRejectNullProperties() { + try { + Setting.simpleString("foo.bar", (Property[]) null); + fail(); + } catch (IllegalArgumentException ex) { + assertThat(ex.getMessage(), containsString("properties can not be null for setting")); + } + } } diff --git a/core/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java b/core/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java index 4f790c2d3a9..e9694981539 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java @@ -20,6 +20,10 @@ package org.elasticsearch.common.settings; import org.elasticsearch.common.inject.ModuleTestCase; +import org.elasticsearch.common.settings.Setting.Property; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; public class SettingsModuleTests extends ModuleTestCase { @@ -45,13 +49,13 @@ public class SettingsModuleTests extends ModuleTestCase { { Settings settings = Settings.builder().put("some.custom.setting", "2.0").build(); SettingsModule module = new SettingsModule(settings); - module.registerSetting(Setting.floatSetting("some.custom.setting", 1.0f, false, Setting.Scope.CLUSTER)); + module.registerSetting(Setting.floatSetting("some.custom.setting", 1.0f, Property.NodeScope)); assertInstanceBinding(module, Settings.class, (s) -> s == settings); } { Settings settings = Settings.builder().put("some.custom.setting", "false").build(); SettingsModule module = new SettingsModule(settings); - module.registerSetting(Setting.floatSetting("some.custom.setting", 1.0f, false, Setting.Scope.CLUSTER)); + module.registerSetting(Setting.floatSetting("some.custom.setting", 1.0f, Property.NodeScope)); try { assertInstanceBinding(module, Settings.class, (s) -> s == settings); fail(); @@ -131,12 +135,11 @@ public class SettingsModuleTests extends ModuleTestCase { public void testRegisterSettingsFilter() { Settings settings = Settings.builder().put("foo.bar", "false").put("bar.foo", false).put("bar.baz", false).build(); SettingsModule module = new SettingsModule(settings); - module.registerSetting(Setting.boolSetting("foo.bar", true, false, Setting.Scope.CLUSTER)); - module.registerSetting(Setting.boolSetting("bar.foo", true, false, Setting.Scope.CLUSTER)); - module.registerSetting(Setting.boolSetting("bar.baz", true, false, Setting.Scope.CLUSTER)); + module.registerSetting(Setting.boolSetting("foo.bar", true, Property.NodeScope)); + module.registerSetting(Setting.boolSetting("bar.foo", true, Property.NodeScope, Property.Filtered)); + module.registerSetting(Setting.boolSetting("bar.baz", true, Property.NodeScope)); module.registerSettingsFilter("foo.*"); - module.registerSettingsFilterIfMissing("bar.foo"); try { module.registerSettingsFilter("bar.foo"); fail(); @@ -149,4 +152,34 @@ public class SettingsModuleTests extends ModuleTestCase { assertInstanceBinding(module, SettingsFilter.class, (s) -> s.filter(settings).getAsMap().get("bar.baz").equals("false")); } + + public void testMutuallyExclusiveScopes() { + new SettingsModule(Settings.EMPTY).registerSetting(Setting.simpleString("foo.bar", Property.NodeScope)); + new SettingsModule(Settings.EMPTY).registerSetting(Setting.simpleString("foo.bar", Property.IndexScope)); + + // Those should fail + try { + new SettingsModule(Settings.EMPTY).registerSetting(Setting.simpleString("foo.bar")); + fail("No scope should fail"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("No scope found for setting")); + } + // Some settings have both scopes - that's fine too if they have per-node defaults + SettingsModule module = new SettingsModule(Settings.EMPTY); + module.registerSetting(Setting.simpleString("foo.bar", Property.IndexScope, Property.NodeScope)); + + try { + module.registerSetting(Setting.simpleString("foo.bar", Property.NodeScope)); + fail("already registered"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("Cannot register setting [foo.bar] twice")); + } + + try { + module.registerSetting(Setting.simpleString("foo.bar", Property.IndexScope)); + fail("already registered"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("Cannot register setting [foo.bar] twice")); + } + } } diff --git a/core/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java b/core/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java new file mode 100644 index 00000000000..b158b961d9a --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java @@ -0,0 +1,345 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.util; + +import org.apache.lucene.util.CollectionUtil; +import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.TestUtil; +import org.elasticsearch.Version; +import org.elasticsearch.bwcompat.OldIndexBackwardsCompatibilityIT; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.routing.AllocationId; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.io.FileSystemUtils; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.gateway.MetaDataStateFormat; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardPath; +import org.elasticsearch.index.shard.ShardStateMetaData; +import org.elasticsearch.test.ESTestCase; + +import java.io.BufferedWriter; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.net.URISyntaxException; +import java.nio.charset.StandardCharsets; +import java.nio.file.DirectoryStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; + +@LuceneTestCase.SuppressFileSystems("ExtrasFS") +public class IndexFolderUpgraderTests extends ESTestCase { + + /** + * tests custom data paths are upgraded + */ + public void testUpgradeCustomDataPath() throws IOException { + Path customPath = createTempDir(); + final Settings nodeSettings = Settings.builder() + .put(NodeEnvironment.ADD_NODE_ID_TO_CUSTOM_PATH.getKey(), randomBoolean()) + .put(Environment.PATH_SHARED_DATA_SETTING.getKey(), customPath.toAbsolutePath().toString()).build(); + try (NodeEnvironment nodeEnv = newNodeEnvironment(nodeSettings)) { + final Index index = new Index(randomAsciiOfLength(10), Strings.randomBase64UUID()); + Settings settings = Settings.builder() + .put(nodeSettings) + .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0) + .put(IndexMetaData.SETTING_DATA_PATH, customPath.toAbsolutePath().toString()) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5)) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .build(); + IndexMetaData indexState = IndexMetaData.builder(index.getName()).settings(settings).build(); + int numIdxFiles = randomIntBetween(1, 5); + int numTranslogFiles = randomIntBetween(1, 5); + IndexSettings indexSettings = new IndexSettings(indexState, nodeSettings); + writeIndex(nodeEnv, indexSettings, numIdxFiles, numTranslogFiles); + IndexFolderUpgrader helper = new IndexFolderUpgrader(settings, nodeEnv); + helper.upgrade(indexSettings.getIndex().getName()); + checkIndex(nodeEnv, indexSettings, numIdxFiles, numTranslogFiles); + } + } + + /** + * tests upgrade on partially upgraded index, when we crash while upgrading + */ + public void testPartialUpgradeCustomDataPath() throws IOException { + Path customPath = createTempDir(); + final Settings nodeSettings = Settings.builder() + .put(NodeEnvironment.ADD_NODE_ID_TO_CUSTOM_PATH.getKey(), randomBoolean()) + .put(Environment.PATH_SHARED_DATA_SETTING.getKey(), customPath.toAbsolutePath().toString()).build(); + try (NodeEnvironment nodeEnv = newNodeEnvironment(nodeSettings)) { + final Index index = new Index(randomAsciiOfLength(10), Strings.randomBase64UUID()); + Settings settings = Settings.builder() + .put(nodeSettings) + .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0) + .put(IndexMetaData.SETTING_DATA_PATH, customPath.toAbsolutePath().toString()) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5)) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .build(); + IndexMetaData indexState = IndexMetaData.builder(index.getName()).settings(settings).build(); + int numIdxFiles = randomIntBetween(1, 5); + int numTranslogFiles = randomIntBetween(1, 5); + IndexSettings indexSettings = new IndexSettings(indexState, nodeSettings); + writeIndex(nodeEnv, indexSettings, numIdxFiles, numTranslogFiles); + IndexFolderUpgrader helper = new IndexFolderUpgrader(settings, nodeEnv) { + @Override + void upgrade(Index index, Path source, Path target) throws IOException { + if(randomBoolean()) { + throw new FileNotFoundException("simulated"); + } + } + }; + // only upgrade some paths + try { + helper.upgrade(index.getName()); + } catch (IOException e) { + assertTrue(e instanceof FileNotFoundException); + } + helper = new IndexFolderUpgrader(settings, nodeEnv); + // try to upgrade again + helper.upgrade(indexSettings.getIndex().getName()); + checkIndex(nodeEnv, indexSettings, numIdxFiles, numTranslogFiles); + } + } + + public void testUpgrade() throws IOException { + final Settings nodeSettings = Settings.builder() + .put(NodeEnvironment.ADD_NODE_ID_TO_CUSTOM_PATH.getKey(), randomBoolean()).build(); + try (NodeEnvironment nodeEnv = newNodeEnvironment(nodeSettings)) { + final Index index = new Index(randomAsciiOfLength(10), Strings.randomBase64UUID()); + Settings settings = Settings.builder() + .put(nodeSettings) + .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5)) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .build(); + IndexMetaData indexState = IndexMetaData.builder(index.getName()).settings(settings).build(); + int numIdxFiles = randomIntBetween(1, 5); + int numTranslogFiles = randomIntBetween(1, 5); + IndexSettings indexSettings = new IndexSettings(indexState, nodeSettings); + writeIndex(nodeEnv, indexSettings, numIdxFiles, numTranslogFiles); + IndexFolderUpgrader helper = new IndexFolderUpgrader(settings, nodeEnv); + helper.upgrade(indexSettings.getIndex().getName()); + checkIndex(nodeEnv, indexSettings, numIdxFiles, numTranslogFiles); + } + } + + public void testUpgradeIndices() throws IOException { + final Settings nodeSettings = Settings.builder() + .put(NodeEnvironment.ADD_NODE_ID_TO_CUSTOM_PATH.getKey(), randomBoolean()).build(); + try (NodeEnvironment nodeEnv = newNodeEnvironment(nodeSettings)) { + Map> indexSettingsMap = new HashMap<>(); + for (int i = 0; i < randomIntBetween(2, 5); i++) { + final Index index = new Index(randomAsciiOfLength(10), Strings.randomBase64UUID()); + Settings settings = Settings.builder() + .put(nodeSettings) + .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5)) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .build(); + IndexMetaData indexState = IndexMetaData.builder(index.getName()).settings(settings).build(); + Tuple fileCounts = new Tuple<>(randomIntBetween(1, 5), randomIntBetween(1, 5)); + IndexSettings indexSettings = new IndexSettings(indexState, nodeSettings); + indexSettingsMap.put(indexSettings, fileCounts); + writeIndex(nodeEnv, indexSettings, fileCounts.v1(), fileCounts.v2()); + } + IndexFolderUpgrader.upgradeIndicesIfNeeded(nodeSettings, nodeEnv); + for (Map.Entry> entry : indexSettingsMap.entrySet()) { + checkIndex(nodeEnv, entry.getKey(), entry.getValue().v1(), entry.getValue().v2()); + } + } + } + + /** + * Run upgrade on a real bwc index + */ + public void testUpgradeRealIndex() throws IOException, URISyntaxException { + List indexes = new ArrayList<>(); + try (DirectoryStream stream = Files.newDirectoryStream(getBwcIndicesPath(), "index-*.zip")) { + for (Path path : stream) { + indexes.add(path); + } + } + CollectionUtil.introSort(indexes, (o1, o2) -> o1.getFileName().compareTo(o2.getFileName())); + final Path path = randomFrom(indexes); + final String indexName = path.getFileName().toString().replace(".zip", "").toLowerCase(Locale.ROOT); + try (NodeEnvironment nodeEnvironment = newNodeEnvironment()) { + Path unzipDir = createTempDir(); + Path unzipDataDir = unzipDir.resolve("data"); + // decompress the index + try (InputStream stream = Files.newInputStream(path)) { + TestUtil.unzip(stream, unzipDir); + } + // check it is unique + assertTrue(Files.exists(unzipDataDir)); + Path[] list = FileSystemUtils.files(unzipDataDir); + if (list.length != 1) { + throw new IllegalStateException("Backwards index must contain exactly one cluster but was " + list.length); + } + // the bwc scripts packs the indices under this path + Path src = list[0].resolve("nodes/0/indices/" + indexName); + assertTrue("[" + path + "] missing index dir: " + src.toString(), Files.exists(src)); + final Path indicesPath = randomFrom(nodeEnvironment.nodePaths()).indicesPath; + logger.info("--> injecting index [{}] into [{}]", indexName, indicesPath); + OldIndexBackwardsCompatibilityIT.copyIndex(logger, src, indexName, indicesPath); + IndexFolderUpgrader.upgradeIndicesIfNeeded(Settings.EMPTY, nodeEnvironment); + + // ensure old index folder is deleted + Set indexFolders = nodeEnvironment.availableIndexFolders(); + assertEquals(indexFolders.size(), 1); + + // ensure index metadata is moved + IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, + nodeEnvironment.resolveIndexFolder(indexFolders.iterator().next())); + assertNotNull(indexMetaData); + Index index = indexMetaData.getIndex(); + assertEquals(index.getName(), indexName); + + Set shardIds = nodeEnvironment.findAllShardIds(index); + // ensure all shards are moved + assertEquals(shardIds.size(), indexMetaData.getNumberOfShards()); + for (ShardId shardId : shardIds) { + final ShardPath shardPath = ShardPath.loadShardPath(logger, nodeEnvironment, shardId, + new IndexSettings(indexMetaData, Settings.EMPTY)); + final Path translog = shardPath.resolveTranslog(); + final Path idx = shardPath.resolveIndex(); + final Path state = shardPath.getShardStatePath().resolve(MetaDataStateFormat.STATE_DIR_NAME); + assertTrue(shardPath.exists()); + assertTrue(Files.exists(translog)); + assertTrue(Files.exists(idx)); + assertTrue(Files.exists(state)); + } + } + } + + public void testNeedsUpgrade() throws IOException { + final Index index = new Index("foo", Strings.randomBase64UUID()); + IndexMetaData indexState = IndexMetaData.builder(index.getName()) + .settings(Settings.builder() + .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + try (NodeEnvironment nodeEnvironment = newNodeEnvironment()) { + IndexMetaData.FORMAT.write(indexState, 1, nodeEnvironment.indexPaths(index)); + assertFalse(IndexFolderUpgrader.needsUpgrade(index, index.getUUID())); + } + } + + private void checkIndex(NodeEnvironment nodeEnv, IndexSettings indexSettings, + int numIdxFiles, int numTranslogFiles) throws IOException { + final Index index = indexSettings.getIndex(); + // ensure index state can be loaded + IndexMetaData loadLatestState = IndexMetaData.FORMAT.loadLatestState(logger, nodeEnv.indexPaths(index)); + assertNotNull(loadLatestState); + assertEquals(loadLatestState.getIndex(), index); + for (int shardId = 0; shardId < indexSettings.getNumberOfShards(); shardId++) { + // ensure shard path can be loaded + ShardPath targetShardPath = ShardPath.loadShardPath(logger, nodeEnv, new ShardId(index, shardId), indexSettings); + assertNotNull(targetShardPath); + // ensure shard contents are copied over + final Path translog = targetShardPath.resolveTranslog(); + final Path idx = targetShardPath.resolveIndex(); + + // ensure index and translog files are copied over + assertEquals(numTranslogFiles, FileSystemUtils.files(translog).length); + assertEquals(numIdxFiles, FileSystemUtils.files(idx).length); + Path[] files = FileSystemUtils.files(translog); + final HashSet translogFiles = new HashSet<>(Arrays.asList(files)); + for (int i = 0; i < numTranslogFiles; i++) { + final String name = Integer.toString(i); + translogFiles.contains(translog.resolve(name + ".translog")); + byte[] content = Files.readAllBytes(translog.resolve(name + ".translog")); + assertEquals(name , new String(content, StandardCharsets.UTF_8)); + } + Path[] indexFileList = FileSystemUtils.files(idx); + final HashSet idxFiles = new HashSet<>(Arrays.asList(indexFileList)); + for (int i = 0; i < numIdxFiles; i++) { + final String name = Integer.toString(i); + idxFiles.contains(idx.resolve(name + ".tst")); + byte[] content = Files.readAllBytes(idx.resolve(name + ".tst")); + assertEquals(name, new String(content, StandardCharsets.UTF_8)); + } + } + } + + private void writeIndex(NodeEnvironment nodeEnv, IndexSettings indexSettings, + int numIdxFiles, int numTranslogFiles) throws IOException { + NodeEnvironment.NodePath[] nodePaths = nodeEnv.nodePaths(); + Path[] oldIndexPaths = new Path[nodePaths.length]; + for (int i = 0; i < nodePaths.length; i++) { + oldIndexPaths[i] = nodePaths[i].indicesPath.resolve(indexSettings.getIndex().getName()); + } + IndexMetaData.FORMAT.write(indexSettings.getIndexMetaData(), 1, oldIndexPaths); + for (int id = 0; id < indexSettings.getNumberOfShards(); id++) { + Path oldIndexPath = randomFrom(oldIndexPaths); + ShardId shardId = new ShardId(indexSettings.getIndex(), id); + if (indexSettings.hasCustomDataPath()) { + Path customIndexPath = nodeEnv.resolveBaseCustomLocation(indexSettings).resolve(indexSettings.getIndex().getName()); + writeShard(shardId, customIndexPath, numIdxFiles, numTranslogFiles); + } else { + writeShard(shardId, oldIndexPath, numIdxFiles, numTranslogFiles); + } + ShardStateMetaData state = new ShardStateMetaData(true, indexSettings.getUUID(), AllocationId.newInitializing()); + ShardStateMetaData.FORMAT.write(state, 1, oldIndexPath.resolve(String.valueOf(shardId.getId()))); + } + } + + private void writeShard(ShardId shardId, Path indexLocation, + final int numIdxFiles, final int numTranslogFiles) throws IOException { + Path oldShardDataPath = indexLocation.resolve(String.valueOf(shardId.getId())); + final Path translogPath = oldShardDataPath.resolve(ShardPath.TRANSLOG_FOLDER_NAME); + final Path idxPath = oldShardDataPath.resolve(ShardPath.INDEX_FOLDER_NAME); + Files.createDirectories(translogPath); + Files.createDirectories(idxPath); + for (int i = 0; i < numIdxFiles; i++) { + String filename = Integer.toString(i); + try (BufferedWriter w = Files.newBufferedWriter(idxPath.resolve(filename + ".tst"), + StandardCharsets.UTF_8)) { + w.write(filename); + } + } + for (int i = 0; i < numTranslogFiles; i++) { + String filename = Integer.toString(i); + try (BufferedWriter w = Files.newBufferedWriter(translogPath.resolve(filename + ".translog"), + StandardCharsets.UTF_8)) { + w.write(filename); + } + } + } +} diff --git a/core/src/test/java/org/elasticsearch/common/util/concurrent/ThreadContextTests.java b/core/src/test/java/org/elasticsearch/common/util/concurrent/ThreadContextTests.java index cbf58bf9daa..1a582d48f6b 100644 --- a/core/src/test/java/org/elasticsearch/common/util/concurrent/ThreadContextTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/concurrent/ThreadContextTests.java @@ -27,6 +27,8 @@ import java.io.IOException; import java.util.Collections; import java.util.HashMap; +import static org.hamcrest.Matchers.sameInstance; + public class ThreadContextTests extends ESTestCase { public void testStashContext() { @@ -235,4 +237,71 @@ public class ThreadContextTests extends ESTestCase { } } + public void testPreserveContext() throws IOException { + try (ThreadContext threadContext = new ThreadContext(Settings.EMPTY)) { + Runnable withContext; + + // Create a runnable that should run with some header + try (ThreadContext.StoredContext ignored = threadContext.stashContext()) { + threadContext.putHeader("foo", "bar"); + withContext = threadContext.preserveContext(sometimesAbstractRunnable(() -> { + assertEquals("bar", threadContext.getHeader("foo")); + })); + } + + // We don't see the header outside of the runnable + assertNull(threadContext.getHeader("foo")); + + // But we do inside of it + withContext.run(); + } + } + + public void testPreserveContextKeepsOriginalContextWhenCalledTwice() throws IOException { + try (ThreadContext threadContext = new ThreadContext(Settings.EMPTY)) { + Runnable originalWithContext; + Runnable withContext; + + // Create a runnable that should run with some header + try (ThreadContext.StoredContext ignored = threadContext.stashContext()) { + threadContext.putHeader("foo", "bar"); + withContext = threadContext.preserveContext(sometimesAbstractRunnable(() -> { + assertEquals("bar", threadContext.getHeader("foo")); + })); + } + + // Now attempt to rewrap it + originalWithContext = withContext; + try (ThreadContext.StoredContext ignored = threadContext.stashContext()) { + threadContext.putHeader("foo", "zot"); + withContext = threadContext.preserveContext(withContext); + } + + // We get the original context inside the runnable + withContext.run(); + + // In fact the second wrapping didn't even change it + assertThat(withContext, sameInstance(originalWithContext)); + } + } + + /** + * Sometimes wraps a Runnable in an AbstractRunnable. + */ + private Runnable sometimesAbstractRunnable(Runnable r) { + if (random().nextBoolean()) { + return r; + } + return new AbstractRunnable() { + @Override + public void onFailure(Throwable t) { + throw new RuntimeException(t); + } + + @Override + protected void doRun() throws Exception { + r.run(); + } + }; + } } diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java new file mode 100644 index 00000000000..cce349f417c --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.xcontent; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.List; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.nullValue; + +public class XContentParserTests extends ESTestCase { + + public void testReadList() throws IOException { + assertThat(readList("{\"foo\": [\"bar\"]}"), contains("bar")); + assertThat(readList("{\"foo\": [\"bar\",\"baz\"]}"), contains("bar", "baz")); + assertThat(readList("{\"foo\": [1, 2, 3], \"bar\": 4}"), contains(1, 2, 3)); + assertThat(readList("{\"foo\": [{\"bar\":1},{\"baz\":2},{\"qux\":3}]}"), hasSize(3)); + assertThat(readList("{\"foo\": [null]}"), contains(nullValue())); + assertThat(readList("{\"foo\": []}"), hasSize(0)); + assertThat(readList("{\"foo\": [1]}"), contains(1)); + assertThat(readList("{\"foo\": [1,2]}"), contains(1, 2)); + assertThat(readList("{\"foo\": [{},{},{},{}]}"), hasSize(4)); + } + + public void testReadListThrowsException() throws IOException { + // Calling XContentParser.list() or listOrderedMap() to read a simple + // value or object should throw an exception + assertReadListThrowsException("{\"foo\": \"bar\"}"); + assertReadListThrowsException("{\"foo\": 1, \"bar\": 2}"); + assertReadListThrowsException("{\"foo\": {\"bar\":\"baz\"}}"); + } + + @SuppressWarnings("unchecked") + private static List readList(String source) throws IOException { + try (XContentParser parser = XContentType.JSON.xContent().createParser(source)) { + XContentParser.Token token = parser.nextToken(); + assertThat(token, equalTo(XContentParser.Token.START_OBJECT)); + token = parser.nextToken(); + assertThat(token, equalTo(XContentParser.Token.FIELD_NAME)); + assertThat(parser.currentName(), equalTo("foo")); + return (List) (randomBoolean() ? parser.listOrderedMap() : parser.list()); + } + } + + private void assertReadListThrowsException(String source) { + try { + readList(source); + fail("should have thrown a parse exception"); + } catch (Exception e) { + assertThat(e, instanceOf(ElasticsearchParseException.class)); + assertThat(e.getMessage(), containsString("Failed to parse list")); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java index 9129e3c05b3..36b16d6a176 100644 --- a/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java @@ -166,7 +166,7 @@ public class XContentBuilderTests extends ESTestCase { byte[] data = bos.bytes().toBytes(); String sData = new String(data, "UTF8"); - System.out.println("DATA: " + sData); + assertThat(sData, equalTo("{\"name\":\"something\", source : { test : \"value\" },\"name2\":\"something2\"}")); } public void testFieldCaseConversion() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/consistencylevel/WriteConsistencyLevelIT.java b/core/src/test/java/org/elasticsearch/consistencylevel/WriteConsistencyLevelIT.java index b696c445f30..46c027cb91c 100644 --- a/core/src/test/java/org/elasticsearch/consistencylevel/WriteConsistencyLevelIT.java +++ b/core/src/test/java/org/elasticsearch/consistencylevel/WriteConsistencyLevelIT.java @@ -40,7 +40,7 @@ public class WriteConsistencyLevelIT extends ESIntegTestCase { prepareCreate("test", 1, Settings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 2)).execute().actionGet(); ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForActiveShards(1).setWaitForYellowStatus().execute().actionGet(); - logger.info("Done Cluster Health, status " + clusterHealth.getStatus()); + logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); @@ -60,7 +60,7 @@ public class WriteConsistencyLevelIT extends ESIntegTestCase { allowNodes("test", 2); clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForActiveShards(2).setWaitForYellowStatus().execute().actionGet(); - logger.info("Done Cluster Health, status " + clusterHealth.getStatus()); + logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); @@ -82,7 +82,7 @@ public class WriteConsistencyLevelIT extends ESIntegTestCase { allowNodes("test", 3); clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForActiveShards(3).setWaitForGreenStatus().execute().actionGet(); - logger.info("Done Cluster Health, status " + clusterHealth.getStatus()); + logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); diff --git a/core/src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java b/core/src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java index 9c702acb2c4..1455b397e74 100644 --- a/core/src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java +++ b/core/src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java @@ -22,7 +22,7 @@ package org.elasticsearch.deps.lucene; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; -import org.apache.lucene.document.IntField; +import org.apache.lucene.document.LegacyIntField; import org.apache.lucene.document.SortedDocValuesField; import org.apache.lucene.document.TextField; import org.apache.lucene.index.DirectoryReader; @@ -51,7 +51,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; -import org.apache.lucene.util.NumericUtils; +import org.apache.lucene.util.LegacyNumericUtils; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.test.ESTestCase; @@ -74,9 +74,9 @@ public class SimpleLuceneTests extends ESTestCase { document.add(new SortedDocValuesField("str", new BytesRef(text))); indexWriter.addDocument(document); } - IndexReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(indexWriter, true)); + IndexReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(indexWriter)); IndexSearcher searcher = new IndexSearcher(reader); - TopFieldDocs docs = searcher.search(new MatchAllDocsQuery(), null, 10, new Sort(new SortField("str", SortField.Type.STRING))); + TopFieldDocs docs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(new SortField("str", SortField.Type.STRING))); for (int i = 0; i < 10; i++) { FieldDoc fieldDoc = (FieldDoc) docs.scoreDocs[i]; assertThat((BytesRef) fieldDoc.fields[0], equalTo(new BytesRef(new String(new char[]{(char) (97 + i), (char) (97 + i)})))); @@ -89,10 +89,10 @@ public class SimpleLuceneTests extends ESTestCase { Document document = new Document(); document.add(new TextField("_id", "1", Field.Store.YES)); - document.add(new IntField("test", 2, IntField.TYPE_STORED)); + document.add(new LegacyIntField("test", 2, LegacyIntField.TYPE_STORED)); indexWriter.addDocument(document); - IndexReader reader = DirectoryReader.open(indexWriter, true); + IndexReader reader = DirectoryReader.open(indexWriter); IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1); Document doc = searcher.doc(topDocs.scoreDocs[0].doc); @@ -100,7 +100,7 @@ public class SimpleLuceneTests extends ESTestCase { assertThat(f.stringValue(), equalTo("2")); BytesRefBuilder bytes = new BytesRefBuilder(); - NumericUtils.intToPrefixCoded(2, 0, bytes); + LegacyNumericUtils.intToPrefixCoded(2, 0, bytes); topDocs = searcher.search(new TermQuery(new Term("test", bytes.get())), 1); doc = searcher.doc(topDocs.scoreDocs[0].doc); f = doc.getField("test"); @@ -123,7 +123,7 @@ public class SimpleLuceneTests extends ESTestCase { document.add(new TextField("#id", "1", Field.Store.YES)); indexWriter.addDocument(document); - IndexReader reader = DirectoryReader.open(indexWriter, true); + IndexReader reader = DirectoryReader.open(indexWriter); IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1); final ArrayList fieldsOrder = new ArrayList<>(); @@ -162,7 +162,7 @@ public class SimpleLuceneTests extends ESTestCase { indexWriter.addDocument(document); } - IndexReader reader = DirectoryReader.open(indexWriter, true); + IndexReader reader = DirectoryReader.open(indexWriter); IndexSearcher searcher = new IndexSearcher(reader); TermQuery query = new TermQuery(new Term("value", "value")); TopDocs topDocs = searcher.search(query, 100); @@ -179,7 +179,7 @@ public class SimpleLuceneTests extends ESTestCase { public void testNRTSearchOnClosedWriter() throws Exception { Directory dir = new RAMDirectory(); IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); - DirectoryReader reader = DirectoryReader.open(indexWriter, true); + DirectoryReader reader = DirectoryReader.open(indexWriter); for (int i = 0; i < 100; i++) { Document document = new Document(); @@ -205,26 +205,26 @@ public class SimpleLuceneTests extends ESTestCase { IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); Document doc = new Document(); - FieldType type = IntField.TYPE_NOT_STORED; - IntField field = new IntField("int1", 1, type); + FieldType type = LegacyIntField.TYPE_NOT_STORED; + LegacyIntField field = new LegacyIntField("int1", 1, type); doc.add(field); - type = new FieldType(IntField.TYPE_NOT_STORED); + type = new FieldType(LegacyIntField.TYPE_NOT_STORED); type.setIndexOptions(IndexOptions.DOCS_AND_FREQS); type.freeze(); - field = new IntField("int1", 1, type); + field = new LegacyIntField("int1", 1, type); doc.add(field); - field = new IntField("int2", 1, type); + field = new LegacyIntField("int2", 1, type); doc.add(field); - field = new IntField("int2", 1, type); + field = new LegacyIntField("int2", 1, type); doc.add(field); indexWriter.addDocument(doc); - IndexReader reader = DirectoryReader.open(indexWriter, true); + IndexReader reader = DirectoryReader.open(indexWriter); LeafReader atomicReader = SlowCompositeReaderWrapper.wrap(reader); Terms terms = atomicReader.terms("int1"); diff --git a/core/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java b/core/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java index 66dc0542678..fbb5115903c 100644 --- a/core/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java +++ b/core/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.deps.lucene; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; import org.apache.lucene.document.TextField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; @@ -53,10 +54,14 @@ public class VectorHighlighterTests extends ESTestCase { Document document = new Document(); document.add(new TextField("_id", "1", Field.Store.YES)); - document.add(new Field("content", "the big bad dog", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); + FieldType vectorsType = new FieldType(TextField.TYPE_STORED); + vectorsType.setStoreTermVectors(true); + vectorsType.setStoreTermVectorPositions(true); + vectorsType.setStoreTermVectorOffsets(true); + document.add(new Field("content", "the big bad dog", vectorsType)); indexWriter.addDocument(document); - IndexReader reader = DirectoryReader.open(indexWriter, true); + IndexReader reader = DirectoryReader.open(indexWriter); IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1); @@ -75,10 +80,14 @@ public class VectorHighlighterTests extends ESTestCase { Document document = new Document(); document.add(new TextField("_id", "1", Field.Store.YES)); - document.add(new Field("content", "the big bad dog", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); + FieldType vectorsType = new FieldType(TextField.TYPE_STORED); + vectorsType.setStoreTermVectors(true); + vectorsType.setStoreTermVectorPositions(true); + vectorsType.setStoreTermVectorOffsets(true); + document.add(new Field("content", "the big bad dog", vectorsType)); indexWriter.addDocument(document); - IndexReader reader = DirectoryReader.open(indexWriter, true); + IndexReader reader = DirectoryReader.open(indexWriter); IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1); @@ -87,12 +96,12 @@ public class VectorHighlighterTests extends ESTestCase { FastVectorHighlighter highlighter = new FastVectorHighlighter(); PrefixQuery prefixQuery = new PrefixQuery(new Term("content", "ba")); - assertThat(prefixQuery.getRewriteMethod().getClass().getName(), equalTo(PrefixQuery.CONSTANT_SCORE_FILTER_REWRITE.getClass().getName())); + assertThat(prefixQuery.getRewriteMethod().getClass().getName(), equalTo(PrefixQuery.CONSTANT_SCORE_REWRITE.getClass().getName())); String fragment = highlighter.getBestFragment(highlighter.getFieldQuery(prefixQuery), reader, topDocs.scoreDocs[0].doc, "content", 30); assertThat(fragment, nullValue()); - prefixQuery.setRewriteMethod(PrefixQuery.SCORING_BOOLEAN_QUERY_REWRITE); + prefixQuery.setRewriteMethod(PrefixQuery.SCORING_BOOLEAN_REWRITE); Query rewriteQuery = prefixQuery.rewrite(reader); fragment = highlighter.getBestFragment(highlighter.getFieldQuery(rewriteQuery), reader, topDocs.scoreDocs[0].doc, "content", 30); @@ -100,7 +109,7 @@ public class VectorHighlighterTests extends ESTestCase { // now check with the custom field query prefixQuery = new PrefixQuery(new Term("content", "ba")); - assertThat(prefixQuery.getRewriteMethod().getClass().getName(), equalTo(PrefixQuery.CONSTANT_SCORE_FILTER_REWRITE.getClass().getName())); + assertThat(prefixQuery.getRewriteMethod().getClass().getName(), equalTo(PrefixQuery.CONSTANT_SCORE_REWRITE.getClass().getName())); fragment = highlighter.getBestFragment(new CustomFieldQuery(prefixQuery, reader, highlighter), reader, topDocs.scoreDocs[0].doc, "content", 30); assertThat(fragment, notNullValue()); @@ -112,10 +121,14 @@ public class VectorHighlighterTests extends ESTestCase { Document document = new Document(); document.add(new TextField("_id", "1", Field.Store.YES)); - document.add(new Field("content", "the big bad dog", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); + FieldType vectorsType = new FieldType(TextField.TYPE_NOT_STORED); + vectorsType.setStoreTermVectors(true); + vectorsType.setStoreTermVectorPositions(true); + vectorsType.setStoreTermVectorOffsets(true); + document.add(new Field("content", "the big bad dog", vectorsType)); indexWriter.addDocument(document); - IndexReader reader = DirectoryReader.open(indexWriter, true); + IndexReader reader = DirectoryReader.open(indexWriter); IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1); @@ -133,10 +146,10 @@ public class VectorHighlighterTests extends ESTestCase { Document document = new Document(); document.add(new TextField("_id", "1", Field.Store.YES)); - document.add(new Field("content", "the big bad dog", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO)); + document.add(new TextField("content", "the big bad dog", Field.Store.YES)); indexWriter.addDocument(document); - IndexReader reader = DirectoryReader.open(indexWriter, true); + IndexReader reader = DirectoryReader.open(indexWriter); IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1); diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java index b9d7107ed54..f588652ac8c 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java @@ -27,7 +27,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ClusterStateUpdateTask; @@ -41,6 +40,7 @@ import org.elasticsearch.cluster.routing.Murmur3HashFunction; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; @@ -177,13 +177,17 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { } private void configureUnicastCluster(int numberOfNodes, @Nullable int[] unicastHostsOrdinals, int minimumMasterNode) throws ExecutionException, InterruptedException { + configureUnicastCluster(DEFAULT_SETTINGS, numberOfNodes, unicastHostsOrdinals, minimumMasterNode); + } + + private void configureUnicastCluster(Settings settings, int numberOfNodes, @Nullable int[] unicastHostsOrdinals, int minimumMasterNode) throws ExecutionException, InterruptedException { if (minimumMasterNode < 0) { minimumMasterNode = numberOfNodes / 2 + 1; } logger.info("---> configured unicast"); // TODO: Rarely use default settings form some of these Settings nodeSettings = Settings.builder() - .put(DEFAULT_SETTINGS) + .put(settings) .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), minimumMasterNode) .build(); @@ -196,7 +200,6 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { } } - /** * Test that no split brain occurs under partial network partition. See https://github.com/elastic/elasticsearch/issues/2488 */ @@ -205,7 +208,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { // Figure out what is the elected master node final String masterNode = internalCluster().getMasterName(); - logger.info("---> legit elected master node=" + masterNode); + logger.info("---> legit elected master node={}", masterNode); // Pick a node that isn't the elected master. Set nonMasters = new HashSet<>(nodes); @@ -493,7 +496,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { } int docsPerIndexer = randomInt(3); - logger.info("indexing " + docsPerIndexer + " docs per indexer before partition"); + logger.info("indexing {} docs per indexer before partition", docsPerIndexer); countDownLatchRef.set(new CountDownLatch(docsPerIndexer * indexers.size())); for (Semaphore semaphore : semaphores) { semaphore.release(docsPerIndexer); @@ -505,7 +508,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { disruptionScheme.startDisrupting(); docsPerIndexer = 1 + randomInt(5); - logger.info("indexing " + docsPerIndexer + " docs per indexer during partition"); + logger.info("indexing {} docs per indexer during partition", docsPerIndexer); countDownLatchRef.set(new CountDownLatch(docsPerIndexer * indexers.size())); Collections.shuffle(semaphores, random()); for (Semaphore semaphore : semaphores) { @@ -536,11 +539,11 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { } } finally { if (exceptedExceptions.size() > 0) { - StringBuilder sb = new StringBuilder("Indexing exceptions during disruption:"); + StringBuilder sb = new StringBuilder(); for (Exception e : exceptedExceptions) { sb.append("\n").append(e.getMessage()); } - logger.debug(sb.toString()); + logger.debug("Indexing exceptions during disruption: {}", sb); } logger.info("shutting down indexers"); stop.set(true); @@ -728,7 +731,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { IndexResponse indexResponse = internalCluster().client(notIsolatedNode).prepareIndex("test", "type").setSource("field", "value").get(); assertThat(indexResponse.getVersion(), equalTo(1L)); - logger.info("Verifying if document exists via node[" + notIsolatedNode + "]"); + logger.info("Verifying if document exists via node[{}]", notIsolatedNode); GetResponse getResponse = internalCluster().client(notIsolatedNode).prepareGet("test", "type", indexResponse.getId()) .setPreference("_local") .get(); @@ -742,7 +745,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { ensureGreen("test"); for (String node : nodes) { - logger.info("Verifying if document exists after isolating node[" + isolatedNode + "] via node[" + node + "]"); + logger.info("Verifying if document exists after isolating node[{}] via node[{}]", isolatedNode, node); getResponse = internalCluster().client(node).prepareGet("test", "type", indexResponse.getId()) .setPreference("_local") .get(); @@ -761,7 +764,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { List nodes = startCluster(4, -1, new int[]{0}); // Figure out what is the elected master node final String masterNode = internalCluster().getMasterName(); - logger.info("---> legit elected master node=" + masterNode); + logger.info("---> legit elected master node={}", masterNode); List otherNodes = new ArrayList<>(nodes); otherNodes.remove(masterNode); otherNodes.remove(nodes.get(0)); // <-- Don't isolate the node that is in the unicast endpoint for all the other nodes. @@ -1075,25 +1078,40 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { * Tests that indices are properly deleted even if there is a master transition in between. * Test for https://github.com/elastic/elasticsearch/issues/11665 */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/16890") public void testIndicesDeleted() throws Exception { - configureUnicastCluster(3, null, 2); + final Settings settings = Settings.builder() + .put(DEFAULT_SETTINGS) + .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0s") // don't wait on isolated data node + .put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "30s") // wait till cluster state is committed + .build(); + final String idxName = "test"; + configureUnicastCluster(settings, 3, null, 2); InternalTestCluster.Async> masterNodes = internalCluster().startMasterOnlyNodesAsync(2); InternalTestCluster.Async dataNode = internalCluster().startDataOnlyNodeAsync(); dataNode.get(); - masterNodes.get(); + final List allMasterEligibleNodes = masterNodes.get(); ensureStableCluster(3); assertAcked(prepareCreate("test")); ensureYellow(); - String masterNode1 = internalCluster().getMasterName(); + final String masterNode1 = internalCluster().getMasterName(); NetworkPartition networkPartition = new NetworkUnresponsivePartition(masterNode1, dataNode.get(), getRandom()); internalCluster().setDisruptionScheme(networkPartition); networkPartition.startDisrupting(); - internalCluster().client(masterNode1).admin().indices().prepareDelete("test").setTimeout("1s").get(); + // We know this will time out due to the partition, we check manually below to not proceed until + // the delete has been applied to the master node and the master eligible node. + internalCluster().client(masterNode1).admin().indices().prepareDelete(idxName).setTimeout("0s").get(); + // Don't restart the master node until we know the index deletion has taken effect on master and the master eligible node. + assertBusy(() -> { + for (String masterNode : allMasterEligibleNodes) { + final ClusterState masterState = internalCluster().clusterService(masterNode).state(); + assertTrue("index not deleted on " + masterNode, masterState.metaData().hasIndex(idxName) == false && + masterState.status() == ClusterState.ClusterStateStatus.APPLIED); + } + }); internalCluster().restartNode(masterNode1, InternalTestCluster.EMPTY_CALLBACK); ensureYellow(); - assertFalse(client().admin().indices().prepareExists("test").get().isExists()); + assertFalse(client().admin().indices().prepareExists(idxName).get().isExists()); } protected NetworkPartition addRandomPartition() { diff --git a/core/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java b/core/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java index 65484b81c79..b58354b77d1 100644 --- a/core/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java @@ -24,13 +24,13 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.zen.fd.FaultDetection; import org.elasticsearch.discovery.zen.fd.MasterFaultDetection; import org.elasticsearch.discovery.zen.fd.NodesFaultDetection; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.cluster.NoopClusterService; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportConnectionListener; @@ -44,10 +44,13 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import static java.util.Collections.emptyMap; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.createClusterService; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.setState; import static org.hamcrest.Matchers.equalTo; public class ZenFaultDetectionTests extends ESTestCase { protected ThreadPool threadPool; + protected ClusterService clusterService; protected static final Version version0 = Version.fromId(/*0*/99); protected DiscoveryNode nodeA; @@ -62,6 +65,7 @@ public class ZenFaultDetectionTests extends ESTestCase { public void setUp() throws Exception { super.setUp(); threadPool = new ThreadPool(getClass().getName()); + clusterService = createClusterService(threadPool); serviceA = build(Settings.builder().put("name", "TS_A").build(), version0); nodeA = new DiscoveryNode("TS_A", "TS_A", serviceA.boundAddress().publishAddress(), emptyMap(), version0); serviceB = build(Settings.builder().put("name", "TS_B").build(), version1); @@ -100,13 +104,14 @@ public class ZenFaultDetectionTests extends ESTestCase { super.tearDown(); serviceA.close(); serviceB.close(); + clusterService.close(); terminate(threadPool); } protected MockTransportService build(Settings settings, Version version) { NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(); MockTransportService transportService = new MockTransportService(Settings.EMPTY, - new LocalTransport(settings, threadPool, version, namedWriteableRegistry), threadPool, namedWriteableRegistry); + new LocalTransport(settings, threadPool, version, namedWriteableRegistry), threadPool); transportService.start(); transportService.acceptIncomingRequests(); return transportService; @@ -186,21 +191,18 @@ public class ZenFaultDetectionTests extends ESTestCase { .put(FaultDetection.PING_INTERVAL_SETTING.getKey(), "5m"); ClusterName clusterName = new ClusterName(randomAsciiOfLengthBetween(3, 20)); final ClusterState state = ClusterState.builder(clusterName).nodes(buildNodesForA(false)).build(); + setState(clusterService, state); MasterFaultDetection masterFD = new MasterFaultDetection(settings.build(), threadPool, serviceA, clusterName, - new NoopClusterService(state)); + clusterService); masterFD.start(nodeB, "test"); final String[] failureReason = new String[1]; final DiscoveryNode[] failureNode = new DiscoveryNode[1]; final CountDownLatch notified = new CountDownLatch(1); - masterFD.addListener(new MasterFaultDetection.Listener() { - - @Override - public void onMasterFailure(DiscoveryNode masterNode, Throwable cause, String reason) { - failureNode[0] = masterNode; - failureReason[0] = reason; - notified.countDown(); - } + masterFD.addListener((masterNode, cause, reason) -> { + failureNode[0] = masterNode; + failureReason[0] = reason; + notified.countDown(); }); // will raise a disconnect on A serviceB.stop(); diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java index 0ca261cbf65..96e7a90ece6 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.DummyTransportAddress; @@ -40,9 +41,12 @@ import org.elasticsearch.common.util.concurrent.BaseFuture; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.membership.MembershipAction; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.cluster.TestClusterService; import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.AfterClass; import org.junit.Before; +import org.junit.BeforeClass; import java.util.ArrayList; import java.util.Collections; @@ -55,28 +59,52 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.createClusterService; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.setState; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @TestLogging("discovery.zen:TRACE") public class NodeJoinControllerTests extends ESTestCase { - private TestClusterService clusterService; + private static ThreadPool threadPool; + + private ClusterService clusterService; private NodeJoinController nodeJoinController; + @BeforeClass + public static void beforeClass() { + threadPool = new ThreadPool("ShardReplicationTests"); + } + + @AfterClass + public static void afterClass() { + ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); + threadPool = null; + } + @Before public void setUp() throws Exception { super.setUp(); - clusterService = new TestClusterService(); + clusterService = createClusterService(threadPool); final DiscoveryNodes initialNodes = clusterService.state().nodes(); final DiscoveryNode localNode = initialNodes.localNode(); // make sure we have a master - clusterService.setState(ClusterState.builder(clusterService.state()).nodes(DiscoveryNodes.builder(initialNodes).masterNodeId(localNode.id()))); + setState(clusterService, ClusterState.builder(clusterService.state()).nodes( + DiscoveryNodes.builder(initialNodes).masterNodeId(localNode.id()))); nodeJoinController = new NodeJoinController(clusterService, new NoopRoutingService(Settings.EMPTY), - new DiscoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), Settings.EMPTY); + new DiscoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), + Settings.EMPTY); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + clusterService.close(); } public void testSimpleJoinAccumulation() throws InterruptedException, ExecutionException { @@ -97,21 +125,29 @@ public class NodeJoinControllerTests extends ESTestCase { pendingJoins.add(joinNodeAsync(node)); } nodeJoinController.stopAccumulatingJoins("test"); + boolean hadSyncJoin = false; for (int i = randomInt(5); i > 0; i--) { DiscoveryNode node = newNode(nodeId++); nodes.add(node); joinNode(node); + hadSyncJoin = true; + } + if (hadSyncJoin) { + for (Future joinFuture : pendingJoins) { + assertThat(joinFuture.isDone(), equalTo(true)); + } } - assertNodesInCurrentState(nodes); for (Future joinFuture : pendingJoins) { - assertThat(joinFuture.isDone(), equalTo(true)); + joinFuture.get(); } + + assertNodesInCurrentState(nodes); } public void testFailingJoinsWhenNotMaster() throws ExecutionException, InterruptedException { // remove current master flag DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterService.state().nodes()).masterNodeId(null); - clusterService.setState(ClusterState.builder(clusterService.state()).nodes(nodes)); + setState(clusterService, ClusterState.builder(clusterService.state()).nodes(nodes)); int nodeId = 0; try { joinNode(newNode(nodeId++)); @@ -142,7 +178,7 @@ public class NodeJoinControllerTests extends ESTestCase { public void testSimpleMasterElectionWithoutRequiredJoins() throws InterruptedException, ExecutionException { DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterService.state().nodes()).masterNodeId(null); - clusterService.setState(ClusterState.builder(clusterService.state()).nodes(nodes)); + setState(clusterService, ClusterState.builder(clusterService.state()).nodes(nodes)); int nodeId = 0; final int requiredJoins = 0; logger.debug("--> using requiredJoins [{}]", requiredJoins); @@ -184,13 +220,13 @@ public class NodeJoinControllerTests extends ESTestCase { }); masterElection.start(); - logger.debug("--> requiredJoins is set to 0. verifying election finished"); - electionFuture.get(); + logger.debug("--> requiredJoins is set to 0. verifying election finished"); + electionFuture.get(); } public void testSimpleMasterElection() throws InterruptedException, ExecutionException { DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterService.state().nodes()).masterNodeId(null); - clusterService.setState(ClusterState.builder(clusterService.state()).nodes(nodes)); + setState(clusterService, ClusterState.builder(clusterService.state()).nodes(nodes)); int nodeId = 0; final int requiredJoins = 1 + randomInt(5); logger.debug("--> using requiredJoins [{}]", requiredJoins); @@ -301,7 +337,7 @@ public class NodeJoinControllerTests extends ESTestCase { public void testMasterElectionTimeout() throws InterruptedException { DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterService.state().nodes()).masterNodeId(null); - clusterService.setState(ClusterState.builder(clusterService.state()).nodes(nodes)); + setState(clusterService, ClusterState.builder(clusterService.state()).nodes(nodes)); int nodeId = 0; final int requiredJoins = 1 + randomInt(5); logger.debug("--> using requiredJoins [{}]", requiredJoins); @@ -367,7 +403,7 @@ public class NodeJoinControllerTests extends ESTestCase { final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(state.nodes()); final DiscoveryNode other_node = new DiscoveryNode("other_node", DummyTransportAddress.INSTANCE, Version.CURRENT); nodesBuilder.put(other_node); - clusterService.setState(ClusterState.builder(state).nodes(nodesBuilder)); + setState(clusterService, ClusterState.builder(state).nodes(nodesBuilder)); state = clusterService.state(); joinNode(other_node); @@ -413,7 +449,7 @@ public class NodeJoinControllerTests extends ESTestCase { public void testElectionWithConcurrentJoins() throws InterruptedException, BrokenBarrierException { DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(clusterService.state().nodes()).masterNodeId(null); - clusterService.setState(ClusterState.builder(clusterService.state()).nodes(nodesBuilder)); + setState(clusterService, ClusterState.builder(clusterService.state()).nodes(nodesBuilder)); nodeJoinController.startAccumulatingJoins(); @@ -492,7 +528,7 @@ public class NodeJoinControllerTests extends ESTestCase { static class NoopAllocationService extends AllocationService { public NoopAllocationService(Settings settings) { - super(settings, null, null, null); + super(settings, null, null, null, null); } @Override @@ -512,11 +548,13 @@ public class NodeJoinControllerTests extends ESTestCase { } protected void assertNodesInCurrentState(List expectedNodes) { - DiscoveryNodes discoveryNodes = clusterService.state().nodes(); - assertThat(discoveryNodes.prettyPrint() + "\nexpected: " + expectedNodes.toString(), discoveryNodes.size(), equalTo(expectedNodes.size())); + final ClusterState state = clusterService.state(); + logger.info("assert for [{}] in:\n{}", expectedNodes, state.prettyPrint()); + DiscoveryNodes discoveryNodes = state.nodes(); for (DiscoveryNode node : expectedNodes) { assertThat("missing " + node + "\n" + discoveryNodes.prettyPrint(), discoveryNodes.get(node.id()), equalTo(node)); } + assertThat(discoveryNodes.size(), equalTo(expectedNodes.size())); } static class SimpleFuture extends BaseFuture { diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java index ee92945c4ff..b7ce4c305e1 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java @@ -25,12 +25,12 @@ import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; @@ -275,10 +275,10 @@ public class ZenDiscoveryIT extends ESIntegTestCase { Settings nodeSettings = Settings.settingsBuilder() .put("discovery.type", "zen") // <-- To override the local setting if set externally .build(); - String nodeName = internalCluster().startNode(nodeSettings, Version.V_2_0_0_beta1); + String nodeName = internalCluster().startNode(nodeSettings, Version.V_5_0_0); ZenDiscovery zenDiscovery = (ZenDiscovery) internalCluster().getInstance(Discovery.class, nodeName); ClusterService clusterService = internalCluster().getInstance(ClusterService.class, nodeName); - DiscoveryNode node = new DiscoveryNode("_node_id", new InetSocketTransportAddress(InetAddress.getByName("0.0.0.0"), 0), Version.V_1_6_0); + DiscoveryNode node = new DiscoveryNode("_node_id", new InetSocketTransportAddress(InetAddress.getByName("0.0.0.0"), 0), Version.V_2_0_0); final AtomicReference holder = new AtomicReference<>(); zenDiscovery.handleJoinRequest(node, clusterService.state(), new MembershipAction.JoinCallback() { @Override @@ -292,16 +292,16 @@ public class ZenDiscoveryIT extends ESIntegTestCase { }); assertThat(holder.get(), notNullValue()); - assertThat(holder.get().getMessage(), equalTo("Can't handle join request from a node with a version [1.6.0] that is lower than the minimum compatible version [" + Version.V_2_0_0_beta1.minimumCompatibilityVersion() + "]")); + assertThat(holder.get().getMessage(), equalTo("Can't handle join request from a node with a version [2.0.0] that is lower than the minimum compatible version [" + Version.V_5_0_0.minimumCompatibilityVersion() + "]")); } public void testJoinElectedMaster_incompatibleMinVersion() { - ElectMasterService electMasterService = new ElectMasterService(Settings.EMPTY, Version.V_2_0_0_beta1); + ElectMasterService electMasterService = new ElectMasterService(Settings.EMPTY, Version.V_5_0_0); - DiscoveryNode node = new DiscoveryNode("_node_id", new LocalTransportAddress("_id"), Version.V_2_0_0_beta1); + DiscoveryNode node = new DiscoveryNode("_node_id", new LocalTransportAddress("_id"), Version.V_5_0_0); assertThat(electMasterService.electMaster(Collections.singletonList(node)), sameInstance(node)); - node = new DiscoveryNode("_node_id", new LocalTransportAddress("_id"), Version.V_1_6_0); - assertThat("Can't join master because version 1.6.0 is lower than the minimum compatable version 2.0.0 can support", electMasterService.electMaster(Collections.singletonList(node)), nullValue()); + node = new DiscoveryNode("_node_id", new LocalTransportAddress("_id"), Version.V_2_0_0); + assertThat("Can't join master because version 2.0.0 is lower than the minimum compatable version 5.0.0 can support", electMasterService.electMaster(Collections.singletonList(node)), nullValue()); } public void testDiscoveryStats() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingIT.java b/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingIT.java index b247dad069e..88d375699a1 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingIT.java @@ -33,7 +33,6 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.discovery.zen.ping.PingContextProvider; import org.elasticsearch.discovery.zen.ping.ZenPing; -import org.elasticsearch.node.service.NodeService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -82,11 +81,6 @@ public class UnicastZenPingIT extends ESTestCase { return DiscoveryNodes.builder().put(nodeA).localNodeId("UZP_A").build(); } - @Override - public NodeService nodeService() { - return null; - } - @Override public boolean nodeHasJoinedClusterOnce() { return false; @@ -101,11 +95,6 @@ public class UnicastZenPingIT extends ESTestCase { return DiscoveryNodes.builder().put(nodeB).localNodeId("UZP_B").build(); } - @Override - public NodeService nodeService() { - return null; - } - @Override public boolean nodeHasJoinedClusterOnce() { return true; diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java index 224ecbdf619..7e31f6055de 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java @@ -43,7 +43,6 @@ import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.DiscoveryNodesProvider; import org.elasticsearch.node.Node; -import org.elasticsearch.node.service.NodeService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; @@ -134,11 +133,6 @@ public class PublishClusterStateActionTests extends ESTestCase { return clusterState.nodes(); } - @Override - public NodeService nodeService() { - assert false; - throw new UnsupportedOperationException("Shouldn't be here"); - } } public MockNode createMockNode(final String name) throws Exception { diff --git a/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java b/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java index d255a80fbb8..3c13351a125 100644 --- a/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java +++ b/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.gateway.MetaDataStateFormat; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardId; @@ -36,7 +37,11 @@ import org.elasticsearch.test.IndexSettingsModule; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; @@ -129,21 +134,22 @@ public class NodeEnvironmentTests extends ESTestCase { public void testShardLock() throws IOException { final NodeEnvironment env = newNodeEnvironment(); - ShardLock fooLock = env.shardLock(new ShardId("foo", "_na_", 0)); - assertEquals(new ShardId("foo", "_na_", 0), fooLock.getShardId()); + Index index = new Index("foo", "fooUUID"); + ShardLock fooLock = env.shardLock(new ShardId(index, 0)); + assertEquals(new ShardId(index, 0), fooLock.getShardId()); try { - env.shardLock(new ShardId("foo", "_na_", 0)); + env.shardLock(new ShardId(index, 0)); fail("shard is locked"); } catch (LockObtainFailedException ex) { // expected } - for (Path path : env.indexPaths("foo")) { + for (Path path : env.indexPaths(index)) { Files.createDirectories(path.resolve("0")); Files.createDirectories(path.resolve("1")); } try { - env.lockAllForIndex(new Index("foo", "_na_"), idxSettings, randomIntBetween(0, 10)); + env.lockAllForIndex(index, idxSettings, randomIntBetween(0, 10)); fail("shard 0 is locked"); } catch (LockObtainFailedException ex) { // expected @@ -151,11 +157,11 @@ public class NodeEnvironmentTests extends ESTestCase { fooLock.close(); // can lock again? - env.shardLock(new ShardId("foo", "_na_", 0)).close(); + env.shardLock(new ShardId(index, 0)).close(); - List locks = env.lockAllForIndex(new Index("foo", "_na_"), idxSettings, randomIntBetween(0, 10)); + List locks = env.lockAllForIndex(index, idxSettings, randomIntBetween(0, 10)); try { - env.shardLock(new ShardId("foo", "_na_", 0)); + env.shardLock(new ShardId(index, 0)); fail("shard is locked"); } catch (LockObtainFailedException ex) { // expected @@ -165,18 +171,45 @@ public class NodeEnvironmentTests extends ESTestCase { env.close(); } - public void testGetAllIndices() throws Exception { + public void testAvailableIndexFolders() throws Exception { final NodeEnvironment env = newNodeEnvironment(); final int numIndices = randomIntBetween(1, 10); + Set actualPaths = new HashSet<>(); for (int i = 0; i < numIndices; i++) { - for (Path path : env.indexPaths("foo" + i)) { - Files.createDirectories(path); + Index index = new Index("foo" + i, "fooUUID" + i); + for (Path path : env.indexPaths(index)) { + Files.createDirectories(path.resolve(MetaDataStateFormat.STATE_DIR_NAME)); + actualPaths.add(path.getFileName().toString()); } } - Set indices = env.findAllIndices(); - assertEquals(indices.size(), numIndices); + + assertThat(actualPaths, equalTo(env.availableIndexFolders())); + assertTrue("LockedShards: " + env.lockedShards(), env.lockedShards().isEmpty()); + env.close(); + } + + public void testResolveIndexFolders() throws Exception { + final NodeEnvironment env = newNodeEnvironment(); + final int numIndices = randomIntBetween(1, 10); + Map> actualIndexDataPaths = new HashMap<>(); for (int i = 0; i < numIndices; i++) { - assertTrue(indices.contains("foo" + i)); + Index index = new Index("foo" + i, "fooUUID" + i); + Path[] indexPaths = env.indexPaths(index); + for (Path path : indexPaths) { + Files.createDirectories(path); + String fileName = path.getFileName().toString(); + List paths = actualIndexDataPaths.get(fileName); + if (paths == null) { + paths = new ArrayList<>(); + } + paths.add(path); + actualIndexDataPaths.put(fileName, paths); + } + } + for (Map.Entry> actualIndexDataPathEntry : actualIndexDataPaths.entrySet()) { + List actual = actualIndexDataPathEntry.getValue(); + Path[] actualPaths = actual.toArray(new Path[actual.size()]); + assertThat(actualPaths, equalTo(env.resolveIndexFolder(actualIndexDataPathEntry.getKey()))); } assertTrue("LockedShards: " + env.lockedShards(), env.lockedShards().isEmpty()); env.close(); @@ -184,44 +217,45 @@ public class NodeEnvironmentTests extends ESTestCase { public void testDeleteSafe() throws IOException, InterruptedException { final NodeEnvironment env = newNodeEnvironment(); - ShardLock fooLock = env.shardLock(new ShardId("foo", "_na_", 0)); - assertEquals(new ShardId("foo", "_na_", 0), fooLock.getShardId()); + final Index index = new Index("foo", "fooUUID"); + ShardLock fooLock = env.shardLock(new ShardId(index, 0)); + assertEquals(new ShardId(index, 0), fooLock.getShardId()); - for (Path path : env.indexPaths("foo")) { + for (Path path : env.indexPaths(index)) { Files.createDirectories(path.resolve("0")); Files.createDirectories(path.resolve("1")); } try { - env.deleteShardDirectorySafe(new ShardId("foo", "_na_", 0), idxSettings); + env.deleteShardDirectorySafe(new ShardId(index, 0), idxSettings); fail("shard is locked"); } catch (LockObtainFailedException ex) { // expected } - for (Path path : env.indexPaths("foo")) { + for (Path path : env.indexPaths(index)) { assertTrue(Files.exists(path.resolve("0"))); assertTrue(Files.exists(path.resolve("1"))); } - env.deleteShardDirectorySafe(new ShardId("foo", "_na_", 1), idxSettings); + env.deleteShardDirectorySafe(new ShardId(index, 1), idxSettings); - for (Path path : env.indexPaths("foo")) { + for (Path path : env.indexPaths(index)) { assertTrue(Files.exists(path.resolve("0"))); assertFalse(Files.exists(path.resolve("1"))); } try { - env.deleteIndexDirectorySafe(new Index("foo", "_na_"), randomIntBetween(0, 10), idxSettings); + env.deleteIndexDirectorySafe(index, randomIntBetween(0, 10), idxSettings); fail("shard is locked"); } catch (LockObtainFailedException ex) { // expected } fooLock.close(); - for (Path path : env.indexPaths("foo")) { + for (Path path : env.indexPaths(index)) { assertTrue(Files.exists(path)); } @@ -242,7 +276,7 @@ public class NodeEnvironmentTests extends ESTestCase { @Override protected void doRun() throws Exception { start.await(); - try (ShardLock autoCloses = env.shardLock(new ShardId("foo", "_na_", 0))) { + try (ShardLock autoCloses = env.shardLock(new ShardId(index, 0))) { blockLatch.countDown(); Thread.sleep(randomIntBetween(1, 10)); } @@ -257,11 +291,11 @@ public class NodeEnvironmentTests extends ESTestCase { start.countDown(); blockLatch.await(); - env.deleteIndexDirectorySafe(new Index("foo", "_na_"), 5000, idxSettings); + env.deleteIndexDirectorySafe(index, 5000, idxSettings); assertNull(threadException.get()); - for (Path path : env.indexPaths("foo")) { + for (Path path : env.indexPaths(index)) { assertFalse(Files.exists(path)); } latch.await(); @@ -300,7 +334,7 @@ public class NodeEnvironmentTests extends ESTestCase { for (int i = 0; i < iters; i++) { int shard = randomIntBetween(0, counts.length - 1); try { - try (ShardLock autoCloses = env.shardLock(new ShardId("foo", "_na_", shard), scaledRandomIntBetween(0, 10))) { + try (ShardLock autoCloses = env.shardLock(new ShardId("foo", "fooUUID", shard), scaledRandomIntBetween(0, 10))) { counts[shard].value++; countsAtomic[shard].incrementAndGet(); assertEquals(flipFlop[shard].incrementAndGet(), 1); @@ -334,37 +368,38 @@ public class NodeEnvironmentTests extends ESTestCase { String[] dataPaths = tmpPaths(); NodeEnvironment env = newNodeEnvironment(dataPaths, "/tmp", Settings.EMPTY); - IndexSettings s1 = IndexSettingsModule.newIndexSettings("myindex", Settings.EMPTY); - IndexSettings s2 = IndexSettingsModule.newIndexSettings("myindex", Settings.builder().put(IndexMetaData.SETTING_DATA_PATH, "/tmp/foo").build()); - Index index = new Index("myindex", "_na_"); + final Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_INDEX_UUID, "myindexUUID").build(); + IndexSettings s1 = IndexSettingsModule.newIndexSettings("myindex", indexSettings); + IndexSettings s2 = IndexSettingsModule.newIndexSettings("myindex", Settings.builder().put(indexSettings).put(IndexMetaData.SETTING_DATA_PATH, "/tmp/foo").build()); + Index index = new Index("myindex", "myindexUUID"); ShardId sid = new ShardId(index, 0); assertFalse("no settings should mean no custom data path", s1.hasCustomDataPath()); assertTrue("settings with path_data should have a custom data path", s2.hasCustomDataPath()); assertThat(env.availableShardPaths(sid), equalTo(env.availableShardPaths(sid))); - assertThat(env.resolveCustomLocation(s2, sid), equalTo(PathUtils.get("/tmp/foo/0/myindex/0"))); + assertThat(env.resolveCustomLocation(s2, sid), equalTo(PathUtils.get("/tmp/foo/0/" + index.getUUID() + "/0"))); assertThat("shard paths with a custom data_path should contain only regular paths", env.availableShardPaths(sid), - equalTo(stringsToPaths(dataPaths, "elasticsearch/nodes/0/indices/myindex/0"))); + equalTo(stringsToPaths(dataPaths, "elasticsearch/nodes/0/indices/" + index.getUUID() + "/0"))); assertThat("index paths uses the regular template", - env.indexPaths(index.getName()), equalTo(stringsToPaths(dataPaths, "elasticsearch/nodes/0/indices/myindex"))); + env.indexPaths(index), equalTo(stringsToPaths(dataPaths, "elasticsearch/nodes/0/indices/" + index.getUUID()))); env.close(); NodeEnvironment env2 = newNodeEnvironment(dataPaths, "/tmp", Settings.builder().put(NodeEnvironment.ADD_NODE_ID_TO_CUSTOM_PATH.getKey(), false).build()); assertThat(env2.availableShardPaths(sid), equalTo(env2.availableShardPaths(sid))); - assertThat(env2.resolveCustomLocation(s2, sid), equalTo(PathUtils.get("/tmp/foo/myindex/0"))); + assertThat(env2.resolveCustomLocation(s2, sid), equalTo(PathUtils.get("/tmp/foo/" + index.getUUID() + "/0"))); assertThat("shard paths with a custom data_path should contain only regular paths", env2.availableShardPaths(sid), - equalTo(stringsToPaths(dataPaths, "elasticsearch/nodes/0/indices/myindex/0"))); + equalTo(stringsToPaths(dataPaths, "elasticsearch/nodes/0/indices/" + index.getUUID() + "/0"))); assertThat("index paths uses the regular template", - env2.indexPaths(index.getName()), equalTo(stringsToPaths(dataPaths, "elasticsearch/nodes/0/indices/myindex"))); + env2.indexPaths(index), equalTo(stringsToPaths(dataPaths, "elasticsearch/nodes/0/indices/" + index.getUUID()))); env2.close(); } diff --git a/core/src/test/java/org/elasticsearch/gateway/DanglingIndicesStateTests.java b/core/src/test/java/org/elasticsearch/gateway/DanglingIndicesStateTests.java index 95c52f89933..51536375dca 100644 --- a/core/src/test/java/org/elasticsearch/gateway/DanglingIndicesStateTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/DanglingIndicesStateTests.java @@ -29,6 +29,7 @@ import org.hamcrest.Matchers; import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.StandardCopyOption; import java.util.Map; import static org.hamcrest.Matchers.equalTo; @@ -53,6 +54,47 @@ public class DanglingIndicesStateTests extends ESTestCase { assertTrue(danglingState.getDanglingIndices().isEmpty()); } } + public void testDanglingIndicesDiscovery() throws Exception { + try (NodeEnvironment env = newNodeEnvironment()) { + MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env); + DanglingIndicesState danglingState = new DanglingIndicesState(Settings.EMPTY, env, metaStateService, null); + + assertTrue(danglingState.getDanglingIndices().isEmpty()); + MetaData metaData = MetaData.builder().build(); + final Settings.Builder settings = Settings.builder().put(indexSettings).put(IndexMetaData.SETTING_INDEX_UUID, "test1UUID"); + IndexMetaData dangledIndex = IndexMetaData.builder("test1").settings(settings).build(); + metaStateService.writeIndex("test_write", dangledIndex); + Map newDanglingIndices = danglingState.findNewDanglingIndices(metaData); + assertTrue(newDanglingIndices.containsKey(dangledIndex.getIndex())); + metaData = MetaData.builder().put(dangledIndex, false).build(); + newDanglingIndices = danglingState.findNewDanglingIndices(metaData); + assertFalse(newDanglingIndices.containsKey(dangledIndex.getIndex())); + } + } + + public void testInvalidIndexFolder() throws Exception { + try (NodeEnvironment env = newNodeEnvironment()) { + MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env); + DanglingIndicesState danglingState = new DanglingIndicesState(Settings.EMPTY, env, metaStateService, null); + + MetaData metaData = MetaData.builder().build(); + final String uuid = "test1UUID"; + final Settings.Builder settings = Settings.builder().put(indexSettings).put(IndexMetaData.SETTING_INDEX_UUID, uuid); + IndexMetaData dangledIndex = IndexMetaData.builder("test1").settings(settings).build(); + metaStateService.writeIndex("test_write", dangledIndex); + for (Path path : env.resolveIndexFolder(uuid)) { + if (Files.exists(path)) { + Files.move(path, path.resolveSibling("invalidUUID"), StandardCopyOption.ATOMIC_MOVE); + } + } + try { + danglingState.findNewDanglingIndices(metaData); + fail("no exception thrown for invalid folder name"); + } catch (IllegalStateException e) { + assertThat(e.getMessage(), equalTo("[invalidUUID] invalid index folder name, rename to [test1UUID]")); + } + } + } public void testDanglingProcessing() throws Exception { try (NodeEnvironment env = newNodeEnvironment()) { @@ -61,15 +103,16 @@ public class DanglingIndicesStateTests extends ESTestCase { MetaData metaData = MetaData.builder().build(); - IndexMetaData dangledIndex = IndexMetaData.builder("test1").settings(indexSettings).build(); - metaStateService.writeIndex("test_write", dangledIndex, null); + final Settings.Builder settings = Settings.builder().put(indexSettings).put(IndexMetaData.SETTING_INDEX_UUID, "test1UUID"); + IndexMetaData dangledIndex = IndexMetaData.builder("test1").settings(settings).build(); + metaStateService.writeIndex("test_write", dangledIndex); // check that several runs when not in the metadata still keep the dangled index around int numberOfChecks = randomIntBetween(1, 10); for (int i = 0; i < numberOfChecks; i++) { - Map newDanglingIndices = danglingState.findNewDanglingIndices(metaData); + Map newDanglingIndices = danglingState.findNewDanglingIndices(metaData); assertThat(newDanglingIndices.size(), equalTo(1)); - assertThat(newDanglingIndices.keySet(), Matchers.hasItems("test1")); + assertThat(newDanglingIndices.keySet(), Matchers.hasItems(dangledIndex.getIndex())); assertTrue(danglingState.getDanglingIndices().isEmpty()); } @@ -77,7 +120,7 @@ public class DanglingIndicesStateTests extends ESTestCase { danglingState.findNewAndAddDanglingIndices(metaData); assertThat(danglingState.getDanglingIndices().size(), equalTo(1)); - assertThat(danglingState.getDanglingIndices().keySet(), Matchers.hasItems("test1")); + assertThat(danglingState.getDanglingIndices().keySet(), Matchers.hasItems(dangledIndex.getIndex())); } // simulate allocation to the metadata @@ -85,35 +128,15 @@ public class DanglingIndicesStateTests extends ESTestCase { // check that several runs when in the metadata, but not cleaned yet, still keeps dangled for (int i = 0; i < numberOfChecks; i++) { - Map newDanglingIndices = danglingState.findNewDanglingIndices(metaData); + Map newDanglingIndices = danglingState.findNewDanglingIndices(metaData); assertTrue(newDanglingIndices.isEmpty()); assertThat(danglingState.getDanglingIndices().size(), equalTo(1)); - assertThat(danglingState.getDanglingIndices().keySet(), Matchers.hasItems("test1")); + assertThat(danglingState.getDanglingIndices().keySet(), Matchers.hasItems(dangledIndex.getIndex())); } danglingState.cleanupAllocatedDangledIndices(metaData); assertTrue(danglingState.getDanglingIndices().isEmpty()); } } - - public void testRenameOfIndexState() throws Exception { - try (NodeEnvironment env = newNodeEnvironment()) { - MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env); - DanglingIndicesState danglingState = new DanglingIndicesState(Settings.EMPTY, env, metaStateService, null); - - MetaData metaData = MetaData.builder().build(); - - IndexMetaData dangledIndex = IndexMetaData.builder("test1").settings(indexSettings).build(); - metaStateService.writeIndex("test_write", dangledIndex, null); - - for (Path path : env.indexPaths("test1")) { - Files.move(path, path.getParent().resolve("test1_renamed")); - } - - Map newDanglingIndices = danglingState.findNewDanglingIndices(metaData); - assertThat(newDanglingIndices.size(), equalTo(1)); - assertThat(newDanglingIndices.keySet(), Matchers.hasItems("test1_renamed")); - } - } } diff --git a/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java b/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java index 422aea70134..4be0cf15afc 100644 --- a/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; +import org.elasticsearch.index.Index; import org.elasticsearch.test.ESAllocationTestCase; import java.util.HashMap; @@ -172,12 +173,12 @@ public class GatewayMetaStateTests extends ESAllocationTestCase { boolean stateInMemory, boolean expectMetaData) throws Exception { MetaData inMemoryMetaData = null; - Set oldIndicesList = emptySet(); + Set oldIndicesList = emptySet(); if (stateInMemory) { inMemoryMetaData = event.previousState().metaData(); oldIndicesList = GatewayMetaState.getRelevantIndices(event.previousState(), event.previousState(), oldIndicesList); } - Set newIndicesList = GatewayMetaState.getRelevantIndices(event.state(),event.previousState(), oldIndicesList); + Set newIndicesList = GatewayMetaState.getRelevantIndices(event.state(),event.previousState(), oldIndicesList); // third, get the actual write info Iterator indices = GatewayMetaState.resolveStatesToBeWritten(oldIndicesList, newIndicesList, inMemoryMetaData, event.state().metaData()).iterator(); diff --git a/core/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java b/core/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java index 118f32a6564..bf9921a2e23 100644 --- a/core/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java @@ -19,22 +19,28 @@ package org.elasticsearch.gateway; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.NoopDiscovery; -import org.elasticsearch.test.cluster.NoopClusterService; import org.hamcrest.Matchers; import java.io.IOException; public class GatewayServiceTests extends ESTestCase { + private GatewayService createService(Settings.Builder settings) { + ClusterService clusterService = new ClusterService(Settings.EMPTY, null, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + null, new ClusterName("ClusterServiceTests")); return new GatewayService(Settings.builder() .put("http.enabled", "false") .put("discovery.type", "local") .put(settings.build()).build(), - null, new NoopClusterService(), null, null, null, null, new NoopDiscovery()); + null, clusterService, null, null, null, null, new NoopDiscovery()); } diff --git a/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java b/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java index 00c549ef2f1..115e5b68ff0 100644 --- a/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java @@ -32,7 +32,6 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -63,7 +62,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.startsWith; @LuceneTestCase.SuppressFileSystems("ExtrasFS") // TODO: fix test to work with ExtrasFS public class MetaDataStateFormatTests extends ESTestCase { @@ -225,94 +223,14 @@ public class MetaDataStateFormatTests extends ESTestCase { msg.append(" after: [").append(checksumAfterCorruption).append("]"); msg.append(" checksum value after corruption: ").append(actualChecksumAfterCorruption).append("]"); msg.append(" file: ").append(fileToCorrupt.getFileName().toString()).append(" length: ").append(dir.fileLength(fileToCorrupt.getFileName().toString())); - logger.debug(msg.toString()); + logger.debug("{}", msg.toString()); assumeTrue("Checksum collision - " + msg.toString(), checksumAfterCorruption != checksumBeforeCorruption // collision || actualChecksumAfterCorruption != checksumBeforeCorruption); // checksum corrupted } } - // If the latest version doesn't use the legacy format while previous versions do, then fail hard - public void testLatestVersionDoesNotUseLegacy() throws IOException { - final ToXContent.Params params = ToXContent.EMPTY_PARAMS; - MetaDataStateFormat format = MetaStateService.globalStateFormat(randomFrom(XContentType.values()), params); - final Path[] dirs = new Path[2]; - dirs[0] = createTempDir(); - dirs[1] = createTempDir(); - for (Path dir : dirs) { - Files.createDirectories(dir.resolve(MetaDataStateFormat.STATE_DIR_NAME)); - } - final Path dir1 = randomFrom(dirs); - final int v1 = randomInt(10); - // write a first state file in the new format - format.write(randomMeta(), v1, dir1); - - // write older state files in the old format but with a newer version - final int numLegacyFiles = randomIntBetween(1, 5); - for (int i = 0; i < numLegacyFiles; ++i) { - final Path dir2 = randomFrom(dirs); - final int v2 = v1 + 1 + randomInt(10); - try (XContentBuilder xcontentBuilder = XContentFactory.contentBuilder(format.format(), Files.newOutputStream(dir2.resolve(MetaDataStateFormat.STATE_DIR_NAME).resolve(MetaStateService.GLOBAL_STATE_FILE_PREFIX + v2)))) { - xcontentBuilder.startObject(); - MetaData.Builder.toXContent(randomMeta(), xcontentBuilder, params); - xcontentBuilder.endObject(); - } - } - - try { - format.loadLatestState(logger, dirs); - fail("latest version can not be read"); - } catch (IllegalStateException ex) { - assertThat(ex.getMessage(), startsWith("Could not find a state file to recover from among ")); - } - // write the next state file in the new format and ensure it get's a higher ID - final MetaData meta = randomMeta(); - format.write(meta, v1, dirs); - final MetaData metaData = format.loadLatestState(logger, dirs); - assertEquals(meta.clusterUUID(), metaData.clusterUUID()); - final Path path = randomFrom(dirs); - final Path[] files = FileSystemUtils.files(path.resolve("_state")); - assertEquals(1, files.length); - assertEquals("global-" + format.findMaxStateId("global-", dirs) + ".st", files[0].getFileName().toString()); - - } - - // If both the legacy and the new format are available for the latest version, prefer the new format - public void testPrefersNewerFormat() throws IOException { - final ToXContent.Params params = ToXContent.EMPTY_PARAMS; - MetaDataStateFormat format = MetaStateService.globalStateFormat(randomFrom(XContentType.values()), params); - final Path[] dirs = new Path[2]; - dirs[0] = createTempDir(); - dirs[1] = createTempDir(); - for (Path dir : dirs) { - Files.createDirectories(dir.resolve(MetaDataStateFormat.STATE_DIR_NAME)); - } - final long v = randomInt(10); - - MetaData meta = randomMeta(); - String uuid = meta.clusterUUID(); - - // write a first state file in the old format - final Path dir2 = randomFrom(dirs); - MetaData meta2 = randomMeta(); - assertFalse(meta2.clusterUUID().equals(uuid)); - try (XContentBuilder xcontentBuilder = XContentFactory.contentBuilder(format.format(), Files.newOutputStream(dir2.resolve(MetaDataStateFormat.STATE_DIR_NAME).resolve(MetaStateService.GLOBAL_STATE_FILE_PREFIX + v)))) { - xcontentBuilder.startObject(); - MetaData.Builder.toXContent(randomMeta(), xcontentBuilder, params); - xcontentBuilder.endObject(); - } - - // write a second state file in the new format but with the same version - format.write(meta, v, dirs); - - MetaData state = format.loadLatestState(logger, dirs); - final Path path = randomFrom(dirs); - assertTrue(Files.exists(path.resolve(MetaDataStateFormat.STATE_DIR_NAME).resolve("global-" + (v+1) + ".st"))); - assertEquals(state.clusterUUID(), uuid); - } - public void testLoadState() throws IOException { - final ToXContent.Params params = ToXContent.EMPTY_PARAMS; final Path[] dirs = new Path[randomIntBetween(1, 5)]; int numStates = randomIntBetween(1, 5); int numLegacy = randomIntBetween(0, numStates); @@ -321,7 +239,7 @@ public class MetaDataStateFormatTests extends ESTestCase { meta.add(randomMeta()); } Set corruptedFiles = new HashSet<>(); - MetaDataStateFormat format = MetaStateService.globalStateFormat(randomFrom(XContentType.values()), params); + MetaDataStateFormat format = metaDataFormat(randomFrom(XContentType.values())); for (int i = 0; i < dirs.length; i++) { dirs[i] = createTempDir(); Files.createDirectories(dirs[i].resolve(MetaDataStateFormat.STATE_DIR_NAME)); @@ -331,9 +249,10 @@ public class MetaDataStateFormatTests extends ESTestCase { Path file = dirs[i].resolve(MetaDataStateFormat.STATE_DIR_NAME).resolve("global-"+j); Files.createFile(file); // randomly create 0-byte files -- there is extra logic to skip them } else { - try (XContentBuilder xcontentBuilder = XContentFactory.contentBuilder(type, Files.newOutputStream(dirs[i].resolve(MetaDataStateFormat.STATE_DIR_NAME).resolve("global-" + j)))) { + try (XContentBuilder xcontentBuilder = XContentFactory.contentBuilder(type, + Files.newOutputStream(dirs[i].resolve(MetaDataStateFormat.STATE_DIR_NAME).resolve("global-" + j)))) { xcontentBuilder.startObject(); - MetaData.Builder.toXContent(meta.get(j), xcontentBuilder, params); + MetaData.Builder.toXContent(meta.get(j), xcontentBuilder, ToXContent.EMPTY_PARAMS); xcontentBuilder.endObject(); } } @@ -380,7 +299,20 @@ public class MetaDataStateFormatTests extends ESTestCase { assertThat(ExceptionsHelper.unwrap(ex, CorruptStateException.class), notNullValue()); } } + } + private static MetaDataStateFormat metaDataFormat(XContentType format) { + return new MetaDataStateFormat(format, MetaData.GLOBAL_STATE_FILE_PREFIX) { + @Override + public void toXContent(XContentBuilder builder, MetaData state) throws IOException { + MetaData.Builder.toXContent(state, builder, ToXContent.EMPTY_PARAMS); + } + + @Override + public MetaData fromXContent(XContentParser parser) throws IOException { + return MetaData.Builder.fromXContent(parser); + } + }; } private MetaData randomMeta() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesIT.java b/core/src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesIT.java index bada7faa8c8..2f1454b8502 100644 --- a/core/src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesIT.java +++ b/core/src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesIT.java @@ -23,7 +23,6 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.NodeEnvironment; @@ -68,14 +67,15 @@ public class MetaDataWriteDataNodesIT extends ESIntegTestCase { index(index, "doc", "1", jsonBuilder().startObject().field("text", "some text").endObject()); ensureGreen(); assertIndexInMetaState(node1, index); - assertIndexDirectoryDeleted(node2, index); + Index resolveIndex = resolveIndex(index); + assertIndexDirectoryDeleted(node2, resolveIndex); assertIndexInMetaState(masterNode, index); logger.debug("relocating index..."); client().admin().indices().prepareUpdateSettings(index).setSettings(Settings.builder().put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "_name", node2)).get(); client().admin().cluster().prepareHealth().setWaitForRelocatingShards(0).get(); ensureGreen(); - assertIndexDirectoryDeleted(node1, index); + assertIndexDirectoryDeleted(node1, resolveIndex); assertIndexInMetaState(node2, index); assertIndexInMetaState(masterNode, index); } @@ -146,10 +146,10 @@ public class MetaDataWriteDataNodesIT extends ESIntegTestCase { assertThat(indicesMetaData.get(index).getState(), equalTo(IndexMetaData.State.OPEN)); } - protected void assertIndexDirectoryDeleted(final String nodeName, final String indexName) throws Exception { + protected void assertIndexDirectoryDeleted(final String nodeName, final Index index) throws Exception { assertBusy(() -> { logger.info("checking if index directory exists..."); - assertFalse("Expecting index directory of " + indexName + " to be deleted from node " + nodeName, indexDirectoryExists(nodeName, indexName)); + assertFalse("Expecting index directory of " + index + " to be deleted from node " + nodeName, indexDirectoryExists(nodeName, index)); } ); } @@ -168,9 +168,9 @@ public class MetaDataWriteDataNodesIT extends ESIntegTestCase { } - private boolean indexDirectoryExists(String nodeName, String indexName) { + private boolean indexDirectoryExists(String nodeName, Index index) { NodeEnvironment nodeEnv = ((InternalTestCluster) cluster()).getInstance(NodeEnvironment.class, nodeName); - for (Path path : nodeEnv.indexPaths(indexName)) { + for (Path path : nodeEnv.indexPaths(index)) { if (Files.exists(path)) { return true; } diff --git a/core/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java b/core/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java index 8bcb9c45402..82c38748a48 100644 --- a/core/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.index.Index; import org.elasticsearch.test.ESTestCase; import static org.hamcrest.Matchers.equalTo; @@ -40,24 +41,24 @@ public class MetaStateServiceTests extends ESTestCase { public void testWriteLoadIndex() throws Exception { try (NodeEnvironment env = newNodeEnvironment()) { - MetaStateService metaStateService = new MetaStateService(randomSettings(), env); + MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env); IndexMetaData index = IndexMetaData.builder("test1").settings(indexSettings).build(); - metaStateService.writeIndex("test_write", index, null); - assertThat(metaStateService.loadIndexState("test1"), equalTo(index)); + metaStateService.writeIndex("test_write", index); + assertThat(metaStateService.loadIndexState(index.getIndex()), equalTo(index)); } } public void testLoadMissingIndex() throws Exception { try (NodeEnvironment env = newNodeEnvironment()) { - MetaStateService metaStateService = new MetaStateService(randomSettings(), env); - assertThat(metaStateService.loadIndexState("test1"), nullValue()); + MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env); + assertThat(metaStateService.loadIndexState(new Index("test1", "test1UUID")), nullValue()); } } public void testWriteLoadGlobal() throws Exception { try (NodeEnvironment env = newNodeEnvironment()) { - MetaStateService metaStateService = new MetaStateService(randomSettings(), env); + MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env); MetaData metaData = MetaData.builder() .persistentSettings(Settings.builder().put("test1", "value1").build()) @@ -69,7 +70,7 @@ public class MetaStateServiceTests extends ESTestCase { public void testWriteGlobalStateWithIndexAndNoIndexIsLoaded() throws Exception { try (NodeEnvironment env = newNodeEnvironment()) { - MetaStateService metaStateService = new MetaStateService(randomSettings(), env); + MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env); MetaData metaData = MetaData.builder() .persistentSettings(Settings.builder().put("test1", "value1").build()) @@ -85,7 +86,7 @@ public class MetaStateServiceTests extends ESTestCase { public void testLoadGlobal() throws Exception { try (NodeEnvironment env = newNodeEnvironment()) { - MetaStateService metaStateService = new MetaStateService(randomSettings(), env); + MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env); IndexMetaData index = IndexMetaData.builder("test1").settings(indexSettings).build(); MetaData metaData = MetaData.builder() @@ -94,7 +95,7 @@ public class MetaStateServiceTests extends ESTestCase { .build(); metaStateService.writeGlobalState("test_write", metaData); - metaStateService.writeIndex("test_write", index, null); + metaStateService.writeIndex("test_write", index); MetaData loadedState = metaStateService.loadFullState(); assertThat(loadedState.persistentSettings(), equalTo(metaData.persistentSettings())); @@ -102,12 +103,4 @@ public class MetaStateServiceTests extends ESTestCase { assertThat(loadedState.index("test1"), equalTo(index)); } } - - private Settings randomSettings() { - Settings.Builder builder = Settings.builder(); - if (randomBoolean()) { - builder.put(MetaStateService.FORMAT_SETTING, randomFrom(XContentType.values()).shortName()); - } - return builder.build(); - } } diff --git a/core/src/test/java/org/elasticsearch/gateway/PriorityComparatorTests.java b/core/src/test/java/org/elasticsearch/gateway/PriorityComparatorTests.java index 6da00d822a2..bb5a6ff748e 100644 --- a/core/src/test/java/org/elasticsearch/gateway/PriorityComparatorTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/PriorityComparatorTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; import org.elasticsearch.test.ESTestCase; import java.util.Arrays; @@ -37,7 +38,7 @@ import java.util.Map; public class PriorityComparatorTests extends ESTestCase { public void testPreferNewIndices() { - RoutingNodes.UnassignedShards shards = new RoutingNodes.UnassignedShards((RoutingNodes) null); + RoutingNodes.UnassignedShards shards = new RoutingNodes.UnassignedShards(null); List shardRoutings = Arrays.asList(TestShardRouting.newShardRouting("oldest", 0, null, null, null, randomBoolean(), ShardRoutingState.UNASSIGNED, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar")), TestShardRouting.newShardRouting("newest", 0, null, null, null, randomBoolean(), ShardRoutingState.UNASSIGNED, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar"))); @@ -47,11 +48,11 @@ public class PriorityComparatorTests extends ESTestCase { } shards.sort(new PriorityComparator() { @Override - protected Settings getIndexSettings(String index) { - if ("oldest".equals(index)) { + protected Settings getIndexSettings(Index index) { + if ("oldest".equals(index.getName())) { return Settings.builder().put(IndexMetaData.SETTING_CREATION_DATE, 10) .put(IndexMetaData.SETTING_PRIORITY, 1).build(); - } else if ("newest".equals(index)) { + } else if ("newest".equals(index.getName())) { return Settings.builder().put(IndexMetaData.SETTING_CREATION_DATE, 100) .put(IndexMetaData.SETTING_PRIORITY, 1).build(); } @@ -77,11 +78,11 @@ public class PriorityComparatorTests extends ESTestCase { } shards.sort(new PriorityComparator() { @Override - protected Settings getIndexSettings(String index) { - if ("oldest".equals(index)) { + protected Settings getIndexSettings(Index index) { + if ("oldest".equals(index.getName())) { return Settings.builder().put(IndexMetaData.SETTING_CREATION_DATE, 10) .put(IndexMetaData.SETTING_PRIORITY, 100).build(); - } else if ("newest".equals(index)) { + } else if ("newest".equals(index.getName())) { return Settings.builder().put(IndexMetaData.SETTING_CREATION_DATE, 100) .put(IndexMetaData.SETTING_PRIORITY, 1).build(); } @@ -118,8 +119,8 @@ public class PriorityComparatorTests extends ESTestCase { } shards.sort(new PriorityComparator() { @Override - protected Settings getIndexSettings(String index) { - IndexMeta indexMeta = map.get(index); + protected Settings getIndexSettings(Index index) { + IndexMeta indexMeta = map.get(index.getName()); return indexMeta.settings; } }); diff --git a/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java b/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java index 399ef9badab..a1d16bfd884 100644 --- a/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java +++ b/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java @@ -82,7 +82,7 @@ public class QuorumGatewayIT extends ESIntegTestCase { assertTrue(awaitBusy(() -> { logger.info("--> running cluster_health (wait for the shards to startup)"); ClusterHealthResponse clusterHealth = activeClient.admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForNodes("2").waitForActiveShards(test.numPrimaries * 2)).actionGet(); - logger.info("--> done cluster_health, status " + clusterHealth.getStatus()); + logger.info("--> done cluster_health, status {}", clusterHealth.getStatus()); return (!clusterHealth.isTimedOut()) && clusterHealth.getStatus() == ClusterHealthStatus.YELLOW; }, 30, TimeUnit.SECONDS)); logger.info("--> one node is closed -- index 1 document into the remaining nodes"); diff --git a/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java b/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java index 8fd6e303220..4da9c2df177 100644 --- a/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java +++ b/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java @@ -380,7 +380,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase { assertSyncIdsNotNull(); } - logger.info("--> disabling allocation while the cluster is shut down", useSyncIds ? "" : " a second time"); + logger.info("--> disabling allocation while the cluster is shut down{}", useSyncIds ? "" : " a second time"); // Disable allocations while we are closing nodes client().admin().cluster().prepareUpdateSettings() .setTransientSettings(settingsBuilder() diff --git a/core/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java b/core/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java index 6f188ef4280..d28e5333225 100644 --- a/core/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java +++ b/core/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java @@ -100,7 +100,7 @@ public class ReusePeerRecoverySharedTest { assertSyncIdsNotNull(); } - logger.info("--> disabling allocation while the cluster is shut down", useSyncIds ? "" : " a second time"); + logger.info("--> disabling allocation while the cluster is shut down{}", useSyncIds ? "" : " a second time"); // Disable allocations while we are closing nodes client().admin().cluster().prepareUpdateSettings().setTransientSettings( settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), EnableAllocationDecider.Allocation.NONE)) diff --git a/core/src/test/java/org/elasticsearch/get/GetActionIT.java b/core/src/test/java/org/elasticsearch/get/GetActionIT.java index 68a4df685be..64d293e8bd0 100644 --- a/core/src/test/java/org/elasticsearch/get/GetActionIT.java +++ b/core/src/test/java/org/elasticsearch/get/GetActionIT.java @@ -65,11 +65,6 @@ import static org.hamcrest.Matchers.startsWith; public class GetActionIT extends ESIntegTestCase { - @Override - protected Collection> nodePlugins() { - return pluginList(InternalSettingsPlugin.class); // uses index.version.created - } - public void testSimpleGet() { assertAcked(prepareCreate("test") .setSettings(Settings.settingsBuilder().put("index.refresh_interval", -1)) @@ -324,128 +319,6 @@ public class GetActionIT extends ESIntegTestCase { assertThat(response.getFields().get("field").getValues().get(1).toString(), equalTo("2")); } - public void testThatGetFromTranslogShouldWorkWithExcludeBackcompat() throws Exception { - String index = "test"; - String type = "type1"; - - String mapping = jsonBuilder() - .startObject() - .startObject(type) - .startObject("_source") - .array("excludes", "excluded") - .endObject() - .endObject() - .endObject() - .string(); - - assertAcked(prepareCreate(index) - .addMapping(type, mapping) - .setSettings("index.refresh_interval", -1, IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id)); - - client().prepareIndex(index, type, "1") - .setSource(jsonBuilder().startObject().field("field", "1", "2").field("excluded", "should not be seen").endObject()) - .get(); - - GetResponse responseBeforeFlush = client().prepareGet(index, type, "1").get(); - client().admin().indices().prepareFlush(index).get(); - GetResponse responseAfterFlush = client().prepareGet(index, type, "1").get(); - - assertThat(responseBeforeFlush.isExists(), is(true)); - assertThat(responseAfterFlush.isExists(), is(true)); - assertThat(responseBeforeFlush.getSourceAsMap(), hasKey("field")); - assertThat(responseBeforeFlush.getSourceAsMap(), not(hasKey("excluded"))); - assertThat(responseBeforeFlush.getSourceAsString(), is(responseAfterFlush.getSourceAsString())); - } - - public void testThatGetFromTranslogShouldWorkWithIncludeBackcompat() throws Exception { - String index = "test"; - String type = "type1"; - - String mapping = jsonBuilder() - .startObject() - .startObject(type) - .startObject("_source") - .array("includes", "included") - .endObject() - .endObject() - .endObject() - .string(); - - assertAcked(prepareCreate(index) - .addMapping(type, mapping) - .setSettings("index.refresh_interval", -1, IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id)); - - client().prepareIndex(index, type, "1") - .setSource(jsonBuilder().startObject().field("field", "1", "2").field("included", "should be seen").endObject()) - .get(); - - GetResponse responseBeforeFlush = client().prepareGet(index, type, "1").get(); - flush(); - GetResponse responseAfterFlush = client().prepareGet(index, type, "1").get(); - - assertThat(responseBeforeFlush.isExists(), is(true)); - assertThat(responseAfterFlush.isExists(), is(true)); - assertThat(responseBeforeFlush.getSourceAsMap(), not(hasKey("field"))); - assertThat(responseBeforeFlush.getSourceAsMap(), hasKey("included")); - assertThat(responseBeforeFlush.getSourceAsString(), is(responseAfterFlush.getSourceAsString())); - } - - @SuppressWarnings("unchecked") - public void testThatGetFromTranslogShouldWorkWithIncludeExcludeAndFieldsBackcompat() throws Exception { - String index = "test"; - String type = "type1"; - - String mapping = jsonBuilder() - .startObject() - .startObject(type) - .startObject("_source") - .array("includes", "included") - .array("excludes", "excluded") - .endObject() - .endObject() - .endObject() - .string(); - - assertAcked(prepareCreate(index) - .addMapping(type, mapping) - .setSettings("index.refresh_interval", -1, IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id)); - - client().prepareIndex(index, type, "1") - .setSource(jsonBuilder().startObject() - .field("field", "1", "2") - .startObject("included").field("field", "should be seen").field("field2", "extra field to remove").endObject() - .startObject("excluded").field("field", "should not be seen").field("field2", "should not be seen").endObject() - .endObject()) - .get(); - - GetResponse responseBeforeFlush = client().prepareGet(index, type, "1").setFields("_source", "included.field", "excluded.field").get(); - assertThat(responseBeforeFlush.isExists(), is(true)); - assertThat(responseBeforeFlush.getSourceAsMap(), not(hasKey("excluded"))); - assertThat(responseBeforeFlush.getSourceAsMap(), not(hasKey("field"))); - assertThat(responseBeforeFlush.getSourceAsMap(), hasKey("included")); - - // now tests that extra source filtering works as expected - GetResponse responseBeforeFlushWithExtraFilters = client().prepareGet(index, type, "1").setFields("included.field", "excluded.field") - .setFetchSource(new String[]{"field", "*.field"}, new String[]{"*.field2"}).get(); - assertThat(responseBeforeFlushWithExtraFilters.isExists(), is(true)); - assertThat(responseBeforeFlushWithExtraFilters.getSourceAsMap(), not(hasKey("excluded"))); - assertThat(responseBeforeFlushWithExtraFilters.getSourceAsMap(), not(hasKey("field"))); - assertThat(responseBeforeFlushWithExtraFilters.getSourceAsMap(), hasKey("included")); - assertThat((Map) responseBeforeFlushWithExtraFilters.getSourceAsMap().get("included"), hasKey("field")); - assertThat((Map) responseBeforeFlushWithExtraFilters.getSourceAsMap().get("included"), not(hasKey("field2"))); - - flush(); - GetResponse responseAfterFlush = client().prepareGet(index, type, "1").setFields("_source", "included.field", "excluded.field").get(); - GetResponse responseAfterFlushWithExtraFilters = client().prepareGet(index, type, "1").setFields("included.field", "excluded.field") - .setFetchSource("*.field", "*.field2").get(); - - assertThat(responseAfterFlush.isExists(), is(true)); - assertThat(responseBeforeFlush.getSourceAsString(), is(responseAfterFlush.getSourceAsString())); - - assertThat(responseAfterFlushWithExtraFilters.isExists(), is(true)); - assertThat(responseBeforeFlushWithExtraFilters.getSourceAsString(), is(responseAfterFlushWithExtraFilters.getSourceAsString())); - } - public void testGetWithVersion() { assertAcked(prepareCreate("test").addAlias(new Alias("alias")) .setSettings(Settings.settingsBuilder().put("index.refresh_interval", -1))); @@ -1002,12 +875,11 @@ public class GetActionIT extends ESIntegTestCase { void indexSingleDocumentWithStringFieldsGeneratedFromText(boolean stored, boolean sourceEnabled) { - String storedString = stored ? "yes" : "no"; + String storedString = stored ? "true" : "false"; String createIndexSource = "{\n" + " \"settings\": {\n" + " \"index.translog.flush_threshold_size\": \"1pb\",\n" + - " \"refresh_interval\": \"-1\",\n" + - " \"" + IndexMetaData.SETTING_VERSION_CREATED + "\": " + Version.V_1_4_2.id + "\n" + + " \"refresh_interval\": \"-1\"\n" + " },\n" + " \"mappings\": {\n" + " \"doc\": {\n" + @@ -1054,12 +926,11 @@ public class GetActionIT extends ESIntegTestCase { } void indexSingleDocumentWithNumericFieldsGeneratedFromText(boolean stored, boolean sourceEnabled) { - String storedString = stored ? "yes" : "no"; + String storedString = stored ? "true" : "false"; String createIndexSource = "{\n" + " \"settings\": {\n" + " \"index.translog.flush_threshold_size\": \"1pb\",\n" + - " \"refresh_interval\": \"-1\",\n" + - " \"" + IndexMetaData.SETTING_VERSION_CREATED + "\": " + Version.V_1_4_2.id + "\n" + + " \"refresh_interval\": \"-1\"\n" + " },\n" + " \"mappings\": {\n" + " \"doc\": {\n" + diff --git a/core/src/test/java/org/elasticsearch/http/netty/NettyHttpServerPipeliningTests.java b/core/src/test/java/org/elasticsearch/http/netty/NettyHttpServerPipeliningTests.java index 6afe8a0aefc..5d4330ec5c3 100644 --- a/core/src/test/java/org/elasticsearch/http/netty/NettyHttpServerPipeliningTests.java +++ b/core/src/test/java/org/elasticsearch/http/netty/NettyHttpServerPipeliningTests.java @@ -181,7 +181,7 @@ public class NettyHttpServerPipeliningTests extends ESTestCase { @Override public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) { - e.getCause().printStackTrace(); + logger.info("Caught exception", e.getCause()); e.getChannel().close(); } } diff --git a/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java index 9e0c3776bf1..b7c2c29eb07 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.cache.recycler.PageCacheRecycler; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.env.Environment; @@ -62,8 +63,8 @@ import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.script.ScriptContextRegistry; import org.elasticsearch.script.ScriptEngineRegistry; import org.elasticsearch.script.ScriptEngineService; -import org.elasticsearch.script.ScriptSettings; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptSettings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.engine.MockEngineFactory; @@ -194,9 +195,9 @@ public class IndexModuleTests extends ESTestCase { public void testListener() throws IOException { - Setting booleanSetting = Setting.boolSetting("foo.bar", false, true, Setting.Scope.INDEX); + Setting booleanSetting = Setting.boolSetting("index.foo.bar", false, Property.Dynamic, Property.IndexScope); IndexModule module = new IndexModule(IndexSettingsModule.newIndexSettings(index, settings, booleanSetting), null, new AnalysisRegistry(null, environment)); - Setting booleanSetting2 = Setting.boolSetting("foo.bar.baz", false, true, Setting.Scope.INDEX); + Setting booleanSetting2 = Setting.boolSetting("index.foo.bar.baz", false, Property.Dynamic, Property.IndexScope); AtomicBoolean atomicBoolean = new AtomicBoolean(false); module.addSettingsUpdateConsumer(booleanSetting, atomicBoolean::set); @@ -332,6 +333,20 @@ public class IndexModuleTests extends ESTestCase { indexService.close("simon says", false); } + public void testForceCacheType() throws IOException { + Settings indexSettings = Settings.settingsBuilder() + .put(IndexModule.INDEX_QUERY_CACHE_TYPE_SETTING.getKey(), "none") + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); + IndexModule module = new IndexModule(IndexSettingsModule.newIndexSettings("foo", indexSettings), null, new AnalysisRegistry(null, environment)); + module.forceQueryCacheType("custom"); + module.registerQueryCache("custom", (a, b) -> new CustomQueryCache()); + IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, indicesQueryCache, mapperRegistry, + new IndicesFieldDataCache(settings, listener)); + assertTrue(indexService.cache().query() instanceof CustomQueryCache); + indexService.close("simon says", false); + } + class CustomQueryCache implements QueryCache { @Override diff --git a/core/src/test/java/org/elasticsearch/index/IndexSettingsTests.java b/core/src/test/java/org/elasticsearch/index/IndexSettingsTests.java index 677c8358fb0..46d99e3b4bc 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/index/IndexSettingsTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -44,7 +45,8 @@ public class IndexSettingsTests extends ESTestCase { Version version = VersionUtils.getPreviousVersion(); Settings theSettings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).put(IndexMetaData.SETTING_INDEX_UUID, "0xdeadbeef").build(); final AtomicInteger integer = new AtomicInteger(0); - Setting integerSetting = Setting.intSetting("index.test.setting.int", -1, true, Setting.Scope.INDEX); + Setting integerSetting = Setting.intSetting("index.test.setting.int", -1, + Property.Dynamic, Property.IndexScope); IndexMetaData metaData = newIndexMeta("index", theSettings); IndexSettings settings = newIndexSettings(newIndexMeta("index", theSettings), Settings.EMPTY, integerSetting); settings.getScopedSettings().addSettingsUpdateConsumer(integerSetting, integer::set); @@ -65,8 +67,10 @@ public class IndexSettingsTests extends ESTestCase { .put(IndexMetaData.SETTING_INDEX_UUID, "0xdeadbeef").build(); final AtomicInteger integer = new AtomicInteger(0); final StringBuilder builder = new StringBuilder(); - Setting integerSetting = Setting.intSetting("index.test.setting.int", -1, true, Setting.Scope.INDEX); - Setting notUpdated = new Setting<>("index.not.updated", "", Function.identity(), true, Setting.Scope.INDEX); + Setting integerSetting = Setting.intSetting("index.test.setting.int", -1, + Property.Dynamic, Property.IndexScope); + Setting notUpdated = new Setting<>("index.not.updated", "", Function.identity(), + Property.Dynamic, Property.IndexScope); IndexSettings settings = newIndexSettings(newIndexMeta("index", theSettings), Settings.EMPTY, integerSetting, notUpdated); settings.getScopedSettings().addSettingsUpdateConsumer(integerSetting, integer::set); @@ -128,7 +132,7 @@ public class IndexSettingsTests extends ESTestCase { Settings nodeSettings = Settings.settingsBuilder().put("index.foo.bar", 43).build(); final AtomicInteger indexValue = new AtomicInteger(0); - Setting integerSetting = Setting.intSetting("index.foo.bar", -1, true, Setting.Scope.INDEX); + Setting integerSetting = Setting.intSetting("index.foo.bar", -1, Property.Dynamic, Property.IndexScope); IndexSettings settings = newIndexSettings(newIndexMeta("index", theSettings), nodeSettings, integerSetting); settings.getScopedSettings().addSettingsUpdateConsumer(integerSetting, indexValue::set); assertEquals(numReplicas, settings.getNumberOfReplicas()); diff --git a/core/src/test/java/org/elasticsearch/index/IndexTests.java b/core/src/test/java/org/elasticsearch/index/IndexTests.java new file mode 100644 index 00000000000..6ce38c6acba --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/IndexTests.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.common.Strings; +import org.elasticsearch.test.ESTestCase; + +import static org.apache.lucene.util.TestUtil.randomSimpleString; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.not; + +public class IndexTests extends ESTestCase { + public void testToString() { + assertEquals("[name/uuid]", new Index("name", "uuid").toString()); + assertEquals("[name]", new Index("name", ClusterState.UNKNOWN_UUID).toString()); + + Index random = new Index(randomSimpleString(random(), 1, 100), + usually() ? Strings.randomBase64UUID(random()) : ClusterState.UNKNOWN_UUID); + assertThat(random.toString(), containsString(random.getName())); + if (ClusterState.UNKNOWN_UUID.equals(random.getUUID())) { + assertThat(random.toString(), not(containsString(random.getUUID()))); + } else { + assertThat(random.toString(), containsString(random.getUUID())); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java b/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java index a7d127a60c8..aa3da8fc840 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java +++ b/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java @@ -156,10 +156,11 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase { assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); ensureGreen(); refresh(); - + Index index = resolveIndex("foo-copy"); for (IndicesService service : internalCluster().getDataNodeInstances(IndicesService.class)) { - if (service.hasIndex("foo-copy")) { - IndexShard shard = service.indexServiceSafe("foo-copy").getShardOrNull(0); + + if (service.hasIndex(index)) { + IndexShard shard = service.indexServiceSafe(index).getShardOrNull(0); if (shard.routingEntry().primary()) { assertFalse(shard instanceof ShadowIndexShard); } else { @@ -201,8 +202,9 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase { IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats(IDX).clear().setTranslog(true).get(); assertEquals(2, indicesStatsResponse.getIndex(IDX).getPrimaries().getTranslog().estimatedNumberOfOperations()); assertEquals(2, indicesStatsResponse.getIndex(IDX).getTotal().getTranslog().estimatedNumberOfOperations()); + Index index = resolveIndex(IDX); for (IndicesService service : internalCluster().getInstances(IndicesService.class)) { - IndexService indexService = service.indexService(IDX); + IndexService indexService = service.indexService(index); if (indexService != null) { IndexShard shard = indexService.getShard(0); TranslogStats translogStats = shard.translogStats(); diff --git a/core/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java b/core/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java index 8fd6876b4b2..9e05122322a 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java +++ b/core/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.index; import org.apache.lucene.document.Field.Store; -import org.apache.lucene.document.IntField; +import org.apache.lucene.document.LegacyIntField; import org.apache.lucene.document.StringField; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -36,24 +36,30 @@ import java.io.IOException; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.startsWith; public class IndexingSlowLogTests extends ESTestCase { public void testSlowLogParsedDocumentPrinterSourceToLog() throws IOException { BytesReference source = JsonXContent.contentBuilder().startObject().field("foo", "bar").endObject().bytes(); - ParsedDocument pd = new ParsedDocument(new StringField("uid", "test:id", Store.YES), new IntField("version", 1, Store.YES), "id", + ParsedDocument pd = new ParsedDocument(new StringField("uid", "test:id", Store.YES), new LegacyIntField("version", 1, Store.YES), "id", "test", null, 0, -1, null, source, null); - + Index index = new Index("foo", "123"); // Turning off document logging doesn't log source[] - SlowLogParsedDocumentPrinter p = new SlowLogParsedDocumentPrinter(pd, 10, true, 0); + SlowLogParsedDocumentPrinter p = new SlowLogParsedDocumentPrinter(index, pd, 10, true, 0); assertThat(p.toString(), not(containsString("source["))); // Turning on document logging logs the whole thing - p = new SlowLogParsedDocumentPrinter(pd, 10, true, Integer.MAX_VALUE); + p = new SlowLogParsedDocumentPrinter(index, pd, 10, true, Integer.MAX_VALUE); assertThat(p.toString(), containsString("source[{\"foo\":\"bar\"}]")); // And you can truncate the source - p = new SlowLogParsedDocumentPrinter(pd, 10, true, 3); + p = new SlowLogParsedDocumentPrinter(index, pd, 10, true, 3); assertThat(p.toString(), containsString("source[{\"f]")); + + // And you can truncate the source + p = new SlowLogParsedDocumentPrinter(index, pd, 10, true, 3); + assertThat(p.toString(), containsString("source[{\"f]")); + assertThat(p.toString(), startsWith("[foo/123] took")); } public void testReformatSetting() { diff --git a/core/src/test/java/org/elasticsearch/index/SettingsListenerIT.java b/core/src/test/java/org/elasticsearch/index/SettingsListenerIT.java index 7dbff244fcc..e9e8dcfc007 100644 --- a/core/src/test/java/org/elasticsearch/index/SettingsListenerIT.java +++ b/core/src/test/java/org/elasticsearch/index/SettingsListenerIT.java @@ -21,6 +21,7 @@ package org.elasticsearch.index; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.plugins.Plugin; @@ -43,7 +44,8 @@ public class SettingsListenerIT extends ESIntegTestCase { public static class SettingsListenerPlugin extends Plugin { private final SettingsTestingService service = new SettingsTestingService(); - private static final Setting SETTING = Setting.intSetting("index.test.new.setting", 0, true, Setting.Scope.INDEX); + private static final Setting SETTING = Setting.intSetting("index.test.new.setting", 0, + Property.Dynamic, Property.IndexScope); /** * The name of the plugin. */ @@ -93,7 +95,8 @@ public class SettingsListenerIT extends ESIntegTestCase { public static class SettingsTestingService { public volatile int value; - public static Setting VALUE = Setting.intSetting("index.test.new.setting", -1, -1, true, Setting.Scope.INDEX); + public static Setting VALUE = Setting.intSetting("index.test.new.setting", -1, -1, + Property.Dynamic, Property.IndexScope); public void setValue(int value) { this.value = value; diff --git a/core/src/test/java/org/elasticsearch/index/analysis/AnalysisModuleTests.java b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisModuleTests.java index 1eb1e93f09c..6468fae9397 100644 --- a/core/src/test/java/org/elasticsearch/index/analysis/AnalysisModuleTests.java +++ b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisModuleTests.java @@ -28,6 +28,8 @@ import org.apache.lucene.analysis.fa.PersianNormalizationFilter; import org.apache.lucene.analysis.hunspell.Dictionary; import org.apache.lucene.analysis.miscellaneous.KeywordRepeatFilter; import org.apache.lucene.analysis.standard.StandardAnalyzer; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.SimpleFSDirectory; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.inject.ModuleTestCase; @@ -106,7 +108,7 @@ public class AnalysisModuleTests extends ModuleTestCase { Settings settings2 = settingsBuilder() .loadFromStream(yaml, getClass().getResourceAsStream(yaml)) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0) .build(); AnalysisRegistry newRegistry = getNewRegistry(settings2); AnalysisService analysisService2 = getAnalysisService(newRegistry, settings2); @@ -119,8 +121,8 @@ public class AnalysisModuleTests extends ModuleTestCase { // analysis service has the expected version assertThat(analysisService2.analyzer("standard").analyzer(), is(instanceOf(StandardAnalyzer.class))); - assertEquals(Version.V_0_90_0.luceneVersion, analysisService2.analyzer("standard").analyzer().getVersion()); - assertEquals(Version.V_0_90_0.luceneVersion, analysisService2.analyzer("thai").analyzer().getVersion()); + assertEquals(Version.V_2_0_0.luceneVersion, analysisService2.analyzer("standard").analyzer().getVersion()); + assertEquals(Version.V_2_0_0.luceneVersion, analysisService2.analyzer("thai").analyzer().getVersion()); assertThat(analysisService2.analyzer("custom7").analyzer(), is(instanceOf(StandardAnalyzer.class))); assertEquals(org.apache.lucene.util.Version.fromBits(3,6,0), analysisService2.analyzer("custom7").analyzer().getVersion()); @@ -268,45 +270,6 @@ public class AnalysisModuleTests extends ModuleTestCase { } } - public void testBackwardCompatible() throws IOException { - Settings settings = settingsBuilder() - .put("index.analysis.analyzer.custom1.tokenizer", "standard") - .put("index.analysis.analyzer.custom1.position_offset_gap", "128") - .put("index.analysis.analyzer.custom2.tokenizer", "standard") - .put("index.analysis.analyzer.custom2.position_increment_gap", "256") - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, - Version.V_1_7_1)) - .build(); - AnalysisService analysisService = getAnalysisService(settings); - - Analyzer custom1 = analysisService.analyzer("custom1").analyzer(); - assertThat(custom1, instanceOf(CustomAnalyzer.class)); - assertThat(custom1.getPositionIncrementGap("custom1"), equalTo(128)); - - Analyzer custom2 = analysisService.analyzer("custom2").analyzer(); - assertThat(custom2, instanceOf(CustomAnalyzer.class)); - assertThat(custom2.getPositionIncrementGap("custom2"), equalTo(256)); - } - - public void testWithBothSettings() throws IOException { - Settings settings = settingsBuilder() - .put("index.analysis.analyzer.custom.tokenizer", "standard") - .put("index.analysis.analyzer.custom.position_offset_gap", "128") - .put("index.analysis.analyzer.custom.position_increment_gap", "256") - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, - Version.V_1_7_1)) - .build(); - try { - getAnalysisService(settings); - fail("Analyzer has both position_offset_gap and position_increment_gap should fail"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), equalTo("Custom Analyzer [custom] defined both [position_offset_gap] and [position_increment_gap]" + - ", use only [position_increment_gap]")); - } - } - public void testDeprecatedPositionOffsetGap() throws IOException { Settings settings = settingsBuilder() .put("index.analysis.analyzer.custom.tokenizer", "standard") @@ -328,11 +291,14 @@ public class AnalysisModuleTests extends ModuleTestCase { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .build(); - AnalysisModule module = new AnalysisModule(new Environment(settings)); + Environment environment = new Environment(settings); + AnalysisModule module = new AnalysisModule(environment); InputStream aff = getClass().getResourceAsStream("/indices/analyze/conf_dir/hunspell/en_US/en_US.aff"); InputStream dic = getClass().getResourceAsStream("/indices/analyze/conf_dir/hunspell/en_US/en_US.dic"); - Dictionary dictionary = new Dictionary(aff, dic); - module.registerHunspellDictionary("foo", dictionary); - assertInstanceBinding(module, HunspellService.class, (x) -> x.getDictionary("foo") == dictionary); + try (Directory tmp = new SimpleFSDirectory(environment.tmpFile())) { + Dictionary dictionary = new Dictionary(tmp, "hunspell", aff, dic); + module.registerHunspellDictionary("foo", dictionary); + assertInstanceBinding(module, HunspellService.class, (x) -> x.getDictionary("foo") == dictionary); + } } } diff --git a/core/src/test/java/org/elasticsearch/index/analysis/AnalyzerBackwardsCompatTests.java b/core/src/test/java/org/elasticsearch/index/analysis/AnalyzerBackwardsCompatTests.java deleted file mode 100644 index a163d9e42b4..00000000000 --- a/core/src/test/java/org/elasticsearch/index/analysis/AnalyzerBackwardsCompatTests.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.index.analysis; - -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.test.ESTokenStreamTestCase; - -import java.io.IOException; - -import static com.carrotsearch.randomizedtesting.RandomizedTest.scaledRandomIntBetween; -import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED; - -public class AnalyzerBackwardsCompatTests extends ESTokenStreamTestCase { - - private void assertNoStopwordsAfter(org.elasticsearch.Version noStopwordVersion, String type) throws IOException { - final int iters = scaledRandomIntBetween(10, 100); - org.elasticsearch.Version version = org.elasticsearch.Version.CURRENT; - for (int i = 0; i < iters; i++) { - Settings.Builder builder = Settings.settingsBuilder().put("index.analysis.filter.my_stop.type", "stop"); - if (version.onOrAfter(noStopwordVersion)) { - if (random().nextBoolean()) { - builder.put(SETTING_VERSION_CREATED, version); - } - } else { - builder.put(SETTING_VERSION_CREATED, version); - } - builder.put("index.analysis.analyzer.foo.type", type); - builder.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()); - AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(builder.build()); - NamedAnalyzer analyzer = analysisService.analyzer("foo"); - assertNotNull(analyzer); - if (version.onOrAfter(noStopwordVersion)) { - assertAnalyzesTo(analyzer, "this is bogus", new String[]{"this", "is", "bogus"}); - } else { - assertAnalyzesTo(analyzer, "this is bogus", new String[]{"bogus"}); - } - version = randomVersion(); - } - } - - public void testPatternAnalyzer() throws IOException { - assertNoStopwordsAfter(org.elasticsearch.Version.V_1_0_0_RC1, "pattern"); - } - - public void testStandardHTMLStripAnalyzer() throws IOException { - assertNoStopwordsAfter(org.elasticsearch.Version.V_1_0_0_RC1, "standard_html_strip"); - } - - public void testStandardAnalyzer() throws IOException { - assertNoStopwordsAfter(org.elasticsearch.Version.V_1_0_0_Beta1, "standard"); - } -} diff --git a/core/src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java index 9d8efb1de4b..5e1cf2e8179 100644 --- a/core/src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java +++ b/core/src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java @@ -23,7 +23,6 @@ import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter; -import org.apache.lucene.analysis.ngram.Lucene43EdgeNGramTokenFilter; import org.apache.lucene.analysis.reverse.ReverseStringFilter; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -120,45 +119,20 @@ public class NGramTokenizerFactoryTests extends ESTokenStreamTestCase { final Index index = new Index("test", "_na_"); final String name = "ngr"; Version v = randomVersion(random()); - if (v.onOrAfter(Version.V_0_90_2)) { - Builder builder = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3); - boolean compatVersion = false; - if ((compatVersion = random().nextBoolean())) { - builder.put("version", "4." + random().nextInt(3)); - } - boolean reverse = random().nextBoolean(); - if (reverse) { - builder.put("side", "back"); - } - Settings settings = builder.build(); - Settings indexSettings = newAnalysisSettingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, v.id).build(); - Tokenizer tokenizer = new MockTokenizer(); - tokenizer.setReader(new StringReader("foo bar")); - TokenStream edgeNGramTokenFilter = new EdgeNGramTokenFilterFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, name, settings).create(tokenizer); - if (reverse) { - assertThat(edgeNGramTokenFilter, instanceOf(ReverseStringFilter.class)); - } else if (compatVersion) { - assertThat(edgeNGramTokenFilter, instanceOf(Lucene43EdgeNGramTokenFilter.class)); - } else { - assertThat(edgeNGramTokenFilter, instanceOf(EdgeNGramTokenFilter.class)); - } - + Builder builder = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3); + boolean reverse = random().nextBoolean(); + if (reverse) { + builder.put("side", "back"); + } + Settings settings = builder.build(); + Settings indexSettings = newAnalysisSettingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, v.id).build(); + Tokenizer tokenizer = new MockTokenizer(); + tokenizer.setReader(new StringReader("foo bar")); + TokenStream edgeNGramTokenFilter = new EdgeNGramTokenFilterFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, name, settings).create(tokenizer); + if (reverse) { + assertThat(edgeNGramTokenFilter, instanceOf(ReverseStringFilter.class)); } else { - Builder builder = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3); - boolean reverse = random().nextBoolean(); - if (reverse) { - builder.put("side", "back"); - } - Settings settings = builder.build(); - Settings indexSettings = newAnalysisSettingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, v.id).build(); - Tokenizer tokenizer = new MockTokenizer(); - tokenizer.setReader(new StringReader("foo bar")); - TokenStream edgeNGramTokenFilter = new EdgeNGramTokenFilterFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, name, settings).create(tokenizer); - if (reverse) { - assertThat(edgeNGramTokenFilter, instanceOf(ReverseStringFilter.class)); - } else { - assertThat(edgeNGramTokenFilter, instanceOf(Lucene43EdgeNGramTokenFilter.class)); - } + assertThat(edgeNGramTokenFilter, instanceOf(EdgeNGramTokenFilter.class)); } } } diff --git a/core/src/test/java/org/elasticsearch/index/analysis/NumericAnalyzerTests.java b/core/src/test/java/org/elasticsearch/index/analysis/NumericAnalyzerTests.java index 89940558d51..10d3d3554dd 100644 --- a/core/src/test/java/org/elasticsearch/index/analysis/NumericAnalyzerTests.java +++ b/core/src/test/java/org/elasticsearch/index/analysis/NumericAnalyzerTests.java @@ -19,8 +19,8 @@ package org.elasticsearch.index.analysis; -import org.apache.lucene.analysis.NumericTokenStream; -import org.apache.lucene.analysis.NumericTokenStream.NumericTermAttribute; +import org.apache.lucene.analysis.LegacyNumericTokenStream; +import org.apache.lucene.analysis.LegacyNumericTokenStream.LegacyNumericTermAttribute; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.elasticsearch.test.ESTestCase; @@ -37,10 +37,10 @@ public class NumericAnalyzerTests extends ESTestCase { NumericDoubleAnalyzer analyzer = new NumericDoubleAnalyzer(precisionStep); final TokenStream ts1 = analyzer.tokenStream("dummy", String.valueOf(value)); - final NumericTokenStream ts2 = new NumericTokenStream(precisionStep); + final LegacyNumericTokenStream ts2 = new LegacyNumericTokenStream(precisionStep); ts2.setDoubleValue(value); - final NumericTermAttribute numTerm1 = ts1.addAttribute(NumericTermAttribute.class); - final NumericTermAttribute numTerm2 = ts1.addAttribute(NumericTermAttribute.class); + final LegacyNumericTermAttribute numTerm1 = ts1.addAttribute(LegacyNumericTermAttribute.class); + final LegacyNumericTermAttribute numTerm2 = ts1.addAttribute(LegacyNumericTermAttribute.class); final PositionIncrementAttribute posInc1 = ts1.addAttribute(PositionIncrementAttribute.class); final PositionIncrementAttribute posInc2 = ts1.addAttribute(PositionIncrementAttribute.class); ts1.reset(); diff --git a/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactoryTests.java deleted file mode 100644 index 2cb8f99e7b8..00000000000 --- a/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactoryTests.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.index.analysis; - -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.indices.analysis.PreBuiltAnalyzers; -import org.elasticsearch.test.ESTestCase; - -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; - -/** - * - */ -public class PreBuiltAnalyzerProviderFactoryTests extends ESTestCase { - public void testVersioningInFactoryProvider() throws Exception { - PreBuiltAnalyzerProviderFactory factory = new PreBuiltAnalyzerProviderFactory("default", AnalyzerScope.INDEX, PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.CURRENT)); - - AnalyzerProvider former090AnalyzerProvider = factory.create("default", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_0).build()); - AnalyzerProvider currentAnalyzerProviderReference = factory.create("default", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build()); - - // would love to access the version inside of the lucene analyzer, but that is not possible... - assertThat(currentAnalyzerProviderReference, is(not(former090AnalyzerProvider))); - } -} diff --git a/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java b/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java index fbb69ea1eb0..06a242c8277 100644 --- a/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java +++ b/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java @@ -59,20 +59,18 @@ public class PreBuiltAnalyzerTests extends ESSingleNodeTestCase { public void testThatInstancesAreTheSameAlwaysForKeywordAnalyzer() { assertThat(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.CURRENT), - is(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.V_0_18_0))); + is(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.V_2_0_0))); } public void testThatInstancesAreCachedAndReused() { - assertThat(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.CURRENT), - is(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.CURRENT))); - assertThat(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_0_18_0), - is(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_0_18_0))); - } + assertSame(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.CURRENT), + PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.CURRENT)); + // same lucene version should be cached + assertSame(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_2_0_0), + PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_2_0_1)); - public void testThatInstancesWithSameLuceneVersionAreReused() { - // both are lucene 4.4 and should return the same instance - assertThat(PreBuiltAnalyzers.CATALAN.getAnalyzer(Version.V_0_90_4), - is(PreBuiltAnalyzers.CATALAN.getAnalyzer(Version.V_0_90_5))); + assertNotSame(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_2_0_0), + PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_2_2_0)); } public void testThatAnalyzersAreUsedInMapping() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltCharFilterFactoryFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltCharFilterFactoryFactoryTests.java deleted file mode 100644 index 39de728a484..00000000000 --- a/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltCharFilterFactoryFactoryTests.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.index.analysis; - -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.indices.analysis.PreBuiltCharFilters; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; - -import static org.hamcrest.CoreMatchers.is; - -/** - * - */ -public class PreBuiltCharFilterFactoryFactoryTests extends ESTestCase { - public void testThatDifferentVersionsCanBeLoaded() throws IOException { - PreBuiltCharFilterFactoryFactory factory = new PreBuiltCharFilterFactoryFactory(PreBuiltCharFilters.HTML_STRIP.getCharFilterFactory(Version.CURRENT)); - - CharFilterFactory former090TokenizerFactory = factory.get(null, null, "html_strip", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_0).build()); - CharFilterFactory former090TokenizerFactoryCopy = factory.get(null, null, "html_strip", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_0).build()); - CharFilterFactory currentTokenizerFactory = factory.get(null, null, "html_strip", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build()); - - assertThat(currentTokenizerFactory, is(former090TokenizerFactory)); - assertThat(currentTokenizerFactory, is(former090TokenizerFactoryCopy)); - } -} diff --git a/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltTokenFilterFactoryFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltTokenFilterFactoryFactoryTests.java deleted file mode 100644 index 670df069926..00000000000 --- a/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltTokenFilterFactoryFactoryTests.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.index.analysis; - -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.indices.analysis.PreBuiltTokenFilters; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; - -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.CoreMatchers.not; - -/** - * - */ -public class PreBuiltTokenFilterFactoryFactoryTests extends ESTestCase { - public void testThatCachingWorksForCachingStrategyOne() throws IOException { - PreBuiltTokenFilterFactoryFactory factory = new PreBuiltTokenFilterFactoryFactory(PreBuiltTokenFilters.WORD_DELIMITER.getTokenFilterFactory(Version.CURRENT)); - - TokenFilterFactory former090TokenizerFactory = factory.get(null, null, "word_delimiter", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_1).build()); - TokenFilterFactory former090TokenizerFactoryCopy = factory.get(null, null, "word_delimiter", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_2).build()); - TokenFilterFactory currentTokenizerFactory = factory.get(null, null, "word_delimiter", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build()); - - assertThat(currentTokenizerFactory, is(former090TokenizerFactory)); - assertThat(currentTokenizerFactory, is(former090TokenizerFactoryCopy)); - } - - public void testThatDifferentVersionsCanBeLoaded() throws IOException { - PreBuiltTokenFilterFactoryFactory factory = new PreBuiltTokenFilterFactoryFactory(PreBuiltTokenFilters.STOP.getTokenFilterFactory(Version.CURRENT)); - - TokenFilterFactory former090TokenizerFactory = factory.get(null, null, "stop", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_1).build()); - TokenFilterFactory former090TokenizerFactoryCopy = factory.get(null, null, "stop", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_2).build()); - TokenFilterFactory currentTokenizerFactory = factory.get(null, null, "stop", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build()); - - assertThat(currentTokenizerFactory, is(not(former090TokenizerFactory))); - assertThat(former090TokenizerFactory, is(former090TokenizerFactoryCopy)); - } -} diff --git a/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltTokenizerFactoryFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltTokenizerFactoryFactoryTests.java deleted file mode 100644 index 162dbb36424..00000000000 --- a/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltTokenizerFactoryFactoryTests.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.index.analysis; - -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.indices.analysis.PreBuiltTokenizers; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; - -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.CoreMatchers.not; - -/** - * - */ -public class PreBuiltTokenizerFactoryFactoryTests extends ESTestCase { - public void testThatDifferentVersionsCanBeLoaded() throws IOException { - PreBuiltTokenizerFactoryFactory factory = new PreBuiltTokenizerFactoryFactory(PreBuiltTokenizers.STANDARD.getTokenizerFactory(Version.CURRENT)); - - // different es versions, same lucene version, thus cached - TokenizerFactory former090TokenizerFactory = factory.get(null, null, "standard", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_1).build()); - TokenizerFactory former090TokenizerFactoryCopy = factory.get(null, null, "standard", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_2).build()); - TokenizerFactory currentTokenizerFactory = factory.get(null, null, "standard", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build()); - - assertThat(currentTokenizerFactory, is(not(former090TokenizerFactory))); - assertThat(currentTokenizerFactory, is(not(former090TokenizerFactoryCopy))); - assertThat(former090TokenizerFactory, is(former090TokenizerFactoryCopy)); - } -} diff --git a/core/src/test/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactoryTests.java index 37844dce69d..f0a6077b497 100644 --- a/core/src/test/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactoryTests.java +++ b/core/src/test/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactoryTests.java @@ -40,10 +40,9 @@ import static org.hamcrest.Matchers.instanceOf; * */ public class StemmerTokenFilterFactoryTests extends ESTokenStreamTestCase { - public void testEnglishBackwardsCompatibility() throws IOException { + public void testEnglishFilterFactory() throws IOException { int iters = scaledRandomIntBetween(20, 100); for (int i = 0; i < iters; i++) { - Version v = VersionUtils.randomVersion(random()); Settings settings = Settings.settingsBuilder() .put("index.analysis.filter.my_english.type", "stemmer") @@ -61,19 +60,13 @@ public class StemmerTokenFilterFactoryTests extends ESTokenStreamTestCase { tokenizer.setReader(new StringReader("foo bar")); TokenStream create = tokenFilter.create(tokenizer); NamedAnalyzer analyzer = analysisService.analyzer("my_english"); - - if (v.onOrAfter(Version.V_1_3_0)) { - assertThat(create, instanceOf(PorterStemFilter.class)); - assertAnalyzesTo(analyzer, "consolingly", new String[]{"consolingli"}); - } else { - assertThat(create, instanceOf(SnowballFilter.class)); - assertAnalyzesTo(analyzer, "consolingly", new String[]{"consol"}); - } + assertThat(create, instanceOf(PorterStemFilter.class)); + assertAnalyzesTo(analyzer, "consolingly", new String[]{"consolingli"}); } } - public void testPorter2BackwardsCompatibility() throws IOException { + public void testPorter2FilterFactory() throws IOException { int iters = scaledRandomIntBetween(20, 100); for (int i = 0; i < iters; i++) { @@ -95,12 +88,7 @@ public class StemmerTokenFilterFactoryTests extends ESTokenStreamTestCase { TokenStream create = tokenFilter.create(tokenizer); NamedAnalyzer analyzer = analysisService.analyzer("my_porter2"); assertThat(create, instanceOf(SnowballFilter.class)); - - if (v.onOrAfter(Version.V_1_3_0)) { - assertAnalyzesTo(analyzer, "possibly", new String[]{"possibl"}); - } else { - assertAnalyzesTo(analyzer, "possibly", new String[]{"possibli"}); - } + assertAnalyzesTo(analyzer, "possibly", new String[]{"possibl"}); } } diff --git a/core/src/test/java/org/elasticsearch/index/analysis/StopTokenFilterTests.java b/core/src/test/java/org/elasticsearch/index/analysis/StopTokenFilterTests.java index 2804f522afa..d319ab44319 100644 --- a/core/src/test/java/org/elasticsearch/index/analysis/StopTokenFilterTests.java +++ b/core/src/test/java/org/elasticsearch/index/analysis/StopTokenFilterTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.analysis.core.Lucene43StopFilter; import org.apache.lucene.analysis.core.StopFilter; import org.apache.lucene.analysis.core.WhitespaceTokenizer; import org.apache.lucene.search.suggest.analyzing.SuggestStopFilter; @@ -57,14 +56,8 @@ public class StopTokenFilterTests extends ESTokenStreamTestCase { public void testCorrectPositionIncrementSetting() throws IOException { Builder builder = Settings.settingsBuilder().put("index.analysis.filter.my_stop.type", "stop"); - int thingToDo = random().nextInt(3); - if (thingToDo == 0) { + if (random().nextBoolean()) { builder.put("index.analysis.filter.my_stop.version", Version.LATEST); - } else if (thingToDo == 1) { - builder.put("index.analysis.filter.my_stop.version", Version.LUCENE_4_0); - if (random().nextBoolean()) { - builder.put("index.analysis.filter.my_stop.enable_position_increments", true); - } } else { // don't specify } @@ -75,27 +68,7 @@ public class StopTokenFilterTests extends ESTokenStreamTestCase { Tokenizer tokenizer = new WhitespaceTokenizer(); tokenizer.setReader(new StringReader("foo bar")); TokenStream create = tokenFilter.create(tokenizer); - if (thingToDo == 1) { - assertThat(create, instanceOf(Lucene43StopFilter.class)); - } else { - assertThat(create, instanceOf(StopFilter.class)); - } - } - - public void testDeprecatedPositionIncrementSettingWithVersions() throws IOException { - Settings settings = Settings.settingsBuilder() - .put("index.analysis.filter.my_stop.type", "stop") - .put("index.analysis.filter.my_stop.enable_position_increments", false) - .put("index.analysis.filter.my_stop.version", "4.3") - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .build(); - AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings); - TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_stop"); - assertThat(tokenFilter, instanceOf(StopTokenFilterFactory.class)); - Tokenizer tokenizer = new WhitespaceTokenizer(); - tokenizer.setReader(new StringReader("foo bar")); - TokenStream create = tokenFilter.create(tokenizer); - assertThat(create, instanceOf(Lucene43StopFilter.class)); + assertThat(create, instanceOf(StopFilter.class)); } public void testThatSuggestStopFilterWorks() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactoryTests.java index a041694dde6..c23875f8a9a 100644 --- a/core/src/test/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactoryTests.java +++ b/core/src/test/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactoryTests.java @@ -146,23 +146,4 @@ public class WordDelimiterTokenFilterFactoryTests extends ESTokenStreamTestCase tokenizer.setReader(new StringReader(source)); assertTokenStreamContents(tokenFilter.create(tokenizer), expected); } - - /** Back compat: - * old offset order when doing both parts and concatenation: PowerShot is a synonym of Shot */ - public void testDeprecatedPartsAndCatenate() throws IOException { - AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .put("index.analysis.filter.my_word_delimiter.type", "word_delimiter") - .put("index.analysis.filter.my_word_delimiter.catenate_words", "true") - .put("index.analysis.filter.my_word_delimiter.generate_word_parts", "true") - .put("index.analysis.filter.my_word_delimiter.version", "4.7") - .build()); - TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter"); - String source = "PowerShot"; - String[] expected = new String[]{"Power", "Shot", "PowerShot" }; - Tokenizer tokenizer = new WhitespaceTokenizer(); - tokenizer.setReader(new StringReader(source)); - assertTokenStreamContents(tokenFilter.create(tokenizer), expected); - } - } diff --git a/core/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTests.java b/core/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTests.java index 18714fe61ef..e82ed61fbed 100644 --- a/core/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTests.java +++ b/core/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTests.java @@ -87,7 +87,7 @@ public class BitSetFilterCacheTests extends ESTestCase { writer.addDocument(document); writer.commit(); - DirectoryReader reader = DirectoryReader.open(writer, false); + DirectoryReader reader = DirectoryReader.open(writer); reader = ElasticsearchDirectoryReader.wrap(reader, new ShardId("test", "_na_", 0)); IndexSearcher searcher = new IndexSearcher(reader); @@ -112,7 +112,7 @@ public class BitSetFilterCacheTests extends ESTestCase { writer.forceMerge(1); reader.close(); - reader = DirectoryReader.open(writer, false); + reader = DirectoryReader.open(writer); reader = ElasticsearchDirectoryReader.wrap(reader, new ShardId("test", "_na_", 0)); searcher = new IndexSearcher(reader); @@ -138,7 +138,7 @@ public class BitSetFilterCacheTests extends ESTestCase { document.add(new StringField("field", "value", Field.Store.NO)); writer.addDocument(document); writer.commit(); - final DirectoryReader writerReader = DirectoryReader.open(writer, false); + final DirectoryReader writerReader = DirectoryReader.open(writer); final IndexReader reader = ElasticsearchDirectoryReader.wrap(writerReader, new ShardId("test", "_na_", 0)); final AtomicLong stats = new AtomicLong(); @@ -211,7 +211,7 @@ public class BitSetFilterCacheTests extends ESTestCase { newIndexWriterConfig() ); writer.addDocument(new Document()); - DirectoryReader reader = DirectoryReader.open(writer, true); + DirectoryReader reader = DirectoryReader.open(writer); writer.close(); reader = ElasticsearchDirectoryReader.wrap(reader, new ShardId("test2", "_na_", 0)); diff --git a/core/src/test/java/org/elasticsearch/index/codec/CodecTests.java b/core/src/test/java/org/elasticsearch/index/codec/CodecTests.java index 3d912d41c38..4fb31bb4ea9 100644 --- a/core/src/test/java/org/elasticsearch/index/codec/CodecTests.java +++ b/core/src/test/java/org/elasticsearch/index/codec/CodecTests.java @@ -20,18 +20,12 @@ package org.elasticsearch.index.codec; import org.apache.lucene.codecs.Codec; -import org.apache.lucene.codecs.lucene40.Lucene40Codec; -import org.apache.lucene.codecs.lucene41.Lucene41Codec; -import org.apache.lucene.codecs.lucene410.Lucene410Codec; -import org.apache.lucene.codecs.lucene42.Lucene42Codec; -import org.apache.lucene.codecs.lucene45.Lucene45Codec; -import org.apache.lucene.codecs.lucene46.Lucene46Codec; -import org.apache.lucene.codecs.lucene49.Lucene49Codec; import org.apache.lucene.codecs.lucene50.Lucene50Codec; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode; import org.apache.lucene.codecs.lucene53.Lucene53Codec; import org.apache.lucene.codecs.lucene54.Lucene54Codec; +import org.apache.lucene.codecs.lucene60.Lucene60Codec; import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; @@ -64,16 +58,10 @@ public class CodecTests extends ESTestCase { public void testResolveDefaultCodecs() throws Exception { CodecService codecService = createCodecService(); assertThat(codecService.codec("default"), instanceOf(PerFieldMappingPostingFormatCodec.class)); - assertThat(codecService.codec("default"), instanceOf(Lucene54Codec.class)); + assertThat(codecService.codec("default"), instanceOf(Lucene60Codec.class)); + assertThat(codecService.codec("Lucene54"), instanceOf(Lucene54Codec.class)); assertThat(codecService.codec("Lucene53"), instanceOf(Lucene53Codec.class)); assertThat(codecService.codec("Lucene50"), instanceOf(Lucene50Codec.class)); - assertThat(codecService.codec("Lucene410"), instanceOf(Lucene410Codec.class)); - assertThat(codecService.codec("Lucene49"), instanceOf(Lucene49Codec.class)); - assertThat(codecService.codec("Lucene46"), instanceOf(Lucene46Codec.class)); - assertThat(codecService.codec("Lucene45"), instanceOf(Lucene45Codec.class)); - assertThat(codecService.codec("Lucene40"), instanceOf(Lucene40Codec.class)); - assertThat(codecService.codec("Lucene41"), instanceOf(Lucene41Codec.class)); - assertThat(codecService.codec("Lucene42"), instanceOf(Lucene42Codec.class)); } public void testDefault() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/index/engine/CommitStatsTests.java b/core/src/test/java/org/elasticsearch/index/engine/CommitStatsTests.java deleted file mode 100644 index 8d9c313a9a2..00000000000 --- a/core/src/test/java/org/elasticsearch/index/engine/CommitStatsTests.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - - -package org.elasticsearch.index.engine; - -import org.apache.lucene.index.SegmentInfos; -import org.elasticsearch.common.io.stream.InputStreamStreamInput; -import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; -import org.elasticsearch.test.ESTestCase; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.IOException; - -import static org.elasticsearch.test.VersionUtils.randomVersion; - - -public class CommitStatsTests extends ESTestCase { - public void testStreamingWithNullId() throws IOException { - SegmentInfos segmentInfos = new SegmentInfos(); - CommitStats commitStats = new CommitStats(segmentInfos); - org.elasticsearch.Version targetNodeVersion = randomVersion(random()); - - ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); - OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer); - out.setVersion(targetNodeVersion); - commitStats.writeTo(out); - - ByteArrayInputStream inBuffer = new ByteArrayInputStream(outBuffer.toByteArray()); - InputStreamStreamInput in = new InputStreamStreamInput(inBuffer); - in.setVersion(targetNodeVersion); - CommitStats readCommitStats = CommitStats.readCommitStatsFrom(in); - assertNull(readCommitStats.getId()); - } -} diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineMergeIT.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineMergeIT.java index a844f971eac..cf56f41c83a 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineMergeIT.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineMergeIT.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; +import org.elasticsearch.test.junit.annotations.TestLogging; import org.hamcrest.Matchers; import java.io.IOException; @@ -38,8 +39,10 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFa @ClusterScope(numDataNodes = 1, scope = Scope.SUITE) public class InternalEngineMergeIT extends ESIntegTestCase { + + @TestLogging("_root:DEBUG") public void testMergesHappening() throws InterruptedException, IOException, ExecutionException { - final int numOfShards = randomIntBetween(1,5); + final int numOfShards = randomIntBetween(1, 5); // some settings to keep num segments low assertAcked(prepareCreate("test").setSettings(Settings.builder() .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numOfShards) diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index e9971a15f8e..ab2041baa4a 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -485,7 +485,7 @@ public class InternalEngineTests extends ESTestCase { if (flush) { // we should have had just 1 merge, so last generation should be exact - assertEquals(gen2 + 1, store.readLastCommittedSegmentsInfo().getLastGeneration()); + assertEquals(gen2, store.readLastCommittedSegmentsInfo().getLastGeneration()); } } } @@ -843,7 +843,7 @@ public class InternalEngineTests extends ESTestCase { Engine.SyncedFlushResult.SUCCESS); assertEquals(3, engine.segments(false).size()); - engine.forceMerge(false, 1, false, false, false); + engine.forceMerge(forceMergeFlushes, 1, false, false, false); if (forceMergeFlushes == false) { engine.refresh("make all segments visible"); assertEquals(4, engine.segments(false).size()); @@ -867,7 +867,7 @@ public class InternalEngineTests extends ESTestCase { assertEquals(engine.getLastWriteNanos(), delete.startTime()); } assertFalse(engine.tryRenewSyncCommit()); - engine.flush(); + engine.flush(false, true); // we might hit a concurrent flush from a finishing merge here - just wait if ongoing... assertNull(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID)); assertNull(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID)); } diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataImplTestCase.java b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataImplTestCase.java index 37e530cc7f4..b6ae9948675 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataImplTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataImplTestCase.java @@ -170,7 +170,7 @@ public abstract class AbstractFieldDataImplTestCase extends AbstractFieldDataTes assertValues(bytesValues, 1, one()); assertValues(bytesValues, 2, three()); - IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, true)); + IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer)); TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(new SortField("value", indexFieldData.comparatorSource(null, MultiValueMode.MIN, null)))); assertThat(topDocs.totalHits, equalTo(3)); assertThat(topDocs.scoreDocs.length, equalTo(3)); @@ -226,7 +226,7 @@ public abstract class AbstractFieldDataImplTestCase extends AbstractFieldDataTes fillExtendedMvSet(); IndexFieldData indexFieldData = getForField("value"); - IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, true)); + IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer)); TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(new SortField("value", indexFieldData.comparatorSource(null, MultiValueMode.MIN, null)))); assertThat(topDocs.totalHits, equalTo(8)); diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java index 5c229545755..66487c54bf2 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java @@ -150,7 +150,7 @@ public abstract class AbstractFieldDataTestCase extends ESSingleNodeTestCase { if (readerContext != null) { readerContext.reader().close(); } - topLevelReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer, true), new ShardId("foo", "_na_", 1)); + topLevelReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "_na_", 1)); LeafReader reader = SlowCompositeReaderWrapper.wrap(topLevelReader); readerContext = reader.getContext(); return readerContext; @@ -168,7 +168,7 @@ public abstract class AbstractFieldDataTestCase extends ESSingleNodeTestCase { protected Nested createNested(IndexSearcher searcher, Query parentFilter, Query childFilter) throws IOException { BitsetFilterCache s = indexService.cache().bitsetFilterCache(); - return new Nested(s.getBitSetProducer(parentFilter), searcher.createNormalizedWeight(childFilter, false)); + return new Nested(s.getBitSetProducer(parentFilter), childFilter); } public void testEmpty() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java index 31a17a684ee..15e4790ca9d 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java @@ -265,7 +265,7 @@ public abstract class AbstractStringFieldDataTestCase extends AbstractFieldDataI final IndexFieldData indexFieldData = getForField("value"); final String missingValue = values[1]; - IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, true)); + IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer)); XFieldComparatorSource comparator = indexFieldData.comparatorSource(missingValue, MultiValueMode.MIN, null); TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), randomBoolean() ? numDocs : randomIntBetween(10, numDocs), new Sort(new SortField("value", comparator, reverse))); assertEquals(numDocs, topDocs.totalHits); @@ -319,7 +319,7 @@ public abstract class AbstractStringFieldDataTestCase extends AbstractFieldDataI } } final IndexFieldData indexFieldData = getForField("value"); - IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, true)); + IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer)); XFieldComparatorSource comparator = indexFieldData.comparatorSource(first ? "_first" : "_last", MultiValueMode.MIN, null); TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), randomBoolean() ? numDocs : randomIntBetween(10, numDocs), new Sort(new SortField("value", comparator, reverse))); assertEquals(numDocs, topDocs.totalHits); @@ -387,7 +387,7 @@ public abstract class AbstractStringFieldDataTestCase extends AbstractFieldDataI writer.commit(); } } - DirectoryReader directoryReader = DirectoryReader.open(writer, true); + DirectoryReader directoryReader = DirectoryReader.open(writer); directoryReader = ElasticsearchDirectoryReader.wrap(directoryReader, new ShardId(indexService.index(), 0)); IndexSearcher searcher = new IndexSearcher(directoryReader); IndexFieldData fieldData = getForField("text"); diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/DuelFieldDataTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/DuelFieldDataTests.java index 26ea97dbf15..7ad8653260e 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/DuelFieldDataTests.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/DuelFieldDataTests.java @@ -125,7 +125,7 @@ public class DuelFieldDataTests extends AbstractFieldDataTestCase { duelFieldDataBytes(random, context, leftFieldData, rightFieldData, pre); duelFieldDataBytes(random, context, rightFieldData, leftFieldData, pre); - DirectoryReader perSegment = DirectoryReader.open(writer, true); + DirectoryReader perSegment = DirectoryReader.open(writer); CompositeReaderContext composite = perSegment.getContext(); List leaves = composite.leaves(); for (LeafReaderContext atomicReaderContext : leaves) { @@ -203,7 +203,7 @@ public class DuelFieldDataTests extends AbstractFieldDataTestCase { duelFieldDataLong(random, context, leftFieldData, rightFieldData); duelFieldDataLong(random, context, rightFieldData, leftFieldData); - DirectoryReader perSegment = DirectoryReader.open(writer, true); + DirectoryReader perSegment = DirectoryReader.open(writer); CompositeReaderContext composite = perSegment.getContext(); List leaves = composite.leaves(); for (LeafReaderContext atomicReaderContext : leaves) { @@ -283,7 +283,7 @@ public class DuelFieldDataTests extends AbstractFieldDataTestCase { duelFieldDataDouble(random, context, leftFieldData, rightFieldData); duelFieldDataDouble(random, context, rightFieldData, leftFieldData); - DirectoryReader perSegment = DirectoryReader.open(writer, true); + DirectoryReader perSegment = DirectoryReader.open(writer); CompositeReaderContext composite = perSegment.getContext(); List leaves = composite.leaves(); for (LeafReaderContext atomicReaderContext : leaves) { @@ -341,7 +341,7 @@ public class DuelFieldDataTests extends AbstractFieldDataTestCase { duelFieldDataBytes(random, context, leftFieldData, rightFieldData, pre); duelFieldDataBytes(random, context, rightFieldData, leftFieldData, pre); - DirectoryReader perSegment = DirectoryReader.open(writer, true); + DirectoryReader perSegment = DirectoryReader.open(writer); CompositeReaderContext composite = perSegment.getContext(); List leaves = composite.leaves(); for (LeafReaderContext atomicReaderContext : leaves) { @@ -449,7 +449,7 @@ public class DuelFieldDataTests extends AbstractFieldDataTestCase { duelFieldDataGeoPoint(random, context, leftFieldData, rightFieldData, precision); duelFieldDataGeoPoint(random, context, rightFieldData, leftFieldData, precision); - DirectoryReader perSegment = DirectoryReader.open(writer, true); + DirectoryReader perSegment = DirectoryReader.open(writer); CompositeReaderContext composite = perSegment.getContext(); List leaves = composite.leaves(); for (LeafReaderContext atomicReaderContext : leaves) { diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/FilterFieldDataTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/FilterFieldDataTests.java index 49cb414208d..3d2b77246a8 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/FilterFieldDataTests.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/FilterFieldDataTests.java @@ -145,7 +145,7 @@ public class FilterFieldDataTests extends AbstractFieldDataTestCase { } writer.addDocument(d); } - logger.debug(hundred + " " + ten + " " + five); + logger.debug("{} {} {}", hundred, ten, five); writer.forceMerge(1, true); LeafReaderContext context = refreshReader(); String[] formats = new String[] { "paged_bytes"}; diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java index 101e7368353..2d204d1003a 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java @@ -114,7 +114,7 @@ public class IndexFieldDataServiceTests extends ESSingleNodeTestCase { Document doc = new Document(); doc.add(new StringField("s", "thisisastring", Store.NO)); writer.addDocument(doc); - DirectoryReader open = DirectoryReader.open(writer, true); + DirectoryReader open = DirectoryReader.open(writer); final boolean wrap = randomBoolean(); final IndexReader reader = wrap ? ElasticsearchDirectoryReader.wrap(open, new ShardId("test", "_na_", 1)) : open; final AtomicInteger onCacheCalled = new AtomicInteger(); diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/ParentChildFieldDataTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/ParentChildFieldDataTests.java index 1e0d8ecdf00..9e1b5d9d167 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/ParentChildFieldDataTests.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/ParentChildFieldDataTests.java @@ -165,7 +165,7 @@ public class ParentChildFieldDataTests extends AbstractFieldDataTestCase { public void testSorting() throws Exception { IndexFieldData indexFieldData = getForField(parentType); - IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, true)); + IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer)); IndexFieldData.XFieldComparatorSource comparator = indexFieldData.comparatorSource("_last", MultiValueMode.MIN, null); TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(new SortField(ParentFieldMapper.joinField(parentType), comparator, false))); @@ -211,7 +211,7 @@ public class ParentChildFieldDataTests extends AbstractFieldDataTestCase { public void testThreads() throws Exception { final ParentChildIndexFieldData indexFieldData = getForField(childType); - final DirectoryReader reader = DirectoryReader.open(writer, true); + final DirectoryReader reader = DirectoryReader.open(writer); final IndexParentChildFieldData global = indexFieldData.loadGlobal(reader); final AtomicReference error = new AtomicReference<>(); final int numThreads = scaledRandomIntBetween(3, 8); diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/fieldcomparator/ReplaceMissingTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/fieldcomparator/ReplaceMissingTests.java index 63b66f47d1a..a291311c3bc 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/fieldcomparator/ReplaceMissingTests.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/fieldcomparator/ReplaceMissingTests.java @@ -52,7 +52,7 @@ public class ReplaceMissingTests extends ESTestCase { iw.close(); DirectoryReader reader = DirectoryReader.open(dir); - LeafReader ar = getOnlySegmentReader(reader); + LeafReader ar = getOnlyLeafReader(reader); SortedDocValues raw = ar.getSortedDocValues("field"); assertEquals(2, raw.getValueCount()); diff --git a/core/src/test/java/org/elasticsearch/index/fieldstats/FieldStatsProviderRefreshTests.java b/core/src/test/java/org/elasticsearch/index/fieldstats/FieldStatsProviderRefreshTests.java new file mode 100644 index 00000000000..cff2d13ce63 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/fieldstats/FieldStatsProviderRefreshTests.java @@ -0,0 +1,99 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.fieldstats; + +import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.indices.IndicesRequestCache; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESSingleNodeTestCase; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.equalTo; + +public class FieldStatsProviderRefreshTests extends ESSingleNodeTestCase { + + public void testQueryRewriteOnRefresh() throws Exception { + assertAcked(client().admin().indices().prepareCreate("index").addMapping("type", "s", "type=text") + .setSettings(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true, + IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1, + IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .get()); + + // Index some documents + indexDocument("1", "d"); + indexDocument("2", "e"); + indexDocument("3", "f"); + refreshIndex(); + + // check request cache stats are clean + assertRequestCacheStats(0, 0); + + // Search for a range and check that it missed the cache (since its the + // first time it has run) + final SearchResponse r1 = client().prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) + .setQuery(QueryBuilders.rangeQuery("s").gte("a").lte("g")).get(); + assertSearchResponse(r1); + assertThat(r1.getHits().getTotalHits(), equalTo(3L)); + assertRequestCacheStats(0, 1); + + // Search again and check it hits the cache + final SearchResponse r2 = client().prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) + .setQuery(QueryBuilders.rangeQuery("s").gte("a").lte("g")).get(); + assertSearchResponse(r2); + assertThat(r2.getHits().getTotalHits(), equalTo(3L)); + assertRequestCacheStats(1, 1); + + // Index some more documents in the query range and refresh + indexDocument("4", "c"); + indexDocument("5", "g"); + refreshIndex(); + + // Search again and check the request cache for another miss since request cache should be invalidated by refresh + final SearchResponse r3 = client().prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) + .setQuery(QueryBuilders.rangeQuery("s").gte("a").lte("g")).get(); + assertSearchResponse(r3); + assertThat(r3.getHits().getTotalHits(), equalTo(5L)); + assertRequestCacheStats(1, 2); + } + + private void assertRequestCacheStats(long expectedHits, long expectedMisses) { + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), + equalTo(expectedHits)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), + equalTo(expectedMisses)); + } + + private void refreshIndex() { + RefreshResponse refreshResponse = client().admin().indices().prepareRefresh("index").get(); + assertThat(refreshResponse.getSuccessfulShards(), equalTo(refreshResponse.getSuccessfulShards())); + } + + private void indexDocument(String id, String sValue) { + IndexResponse response = client().prepareIndex("index", "type", id).setSource("s", sValue).get(); + assertThat(response.status(), anyOf(equalTo(RestStatus.OK), equalTo(RestStatus.CREATED))); + } +} diff --git a/core/src/test/java/org/elasticsearch/index/fieldstats/FieldStatsProviderTests.java b/core/src/test/java/org/elasticsearch/index/fieldstats/FieldStatsProviderTests.java new file mode 100644 index 00000000000..9cad8d3fc8d --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/fieldstats/FieldStatsProviderTests.java @@ -0,0 +1,446 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.fieldstats; + +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.store.BaseDirectoryWrapper; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AnalysisRegistry; +import org.elasticsearch.index.analysis.AnalysisService; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.Engine.Searcher; +import org.elasticsearch.index.fieldstats.FieldStatsProvider.Relation; +import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.MapperService.MergeReason; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.similarity.SimilarityService; +import org.elasticsearch.indices.IndicesModule; +import org.elasticsearch.indices.mapper.MapperRegistry; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.IndexSettingsModule; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.joda.time.chrono.ISOChronology; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collections; + +import static org.elasticsearch.common.settings.Settings.settingsBuilder; +import static org.hamcrest.Matchers.equalTo; + +public class FieldStatsProviderTests extends ESTestCase { + + private DirectoryReader directoryReader; + private Searcher searcher; + private FieldStatsProvider fieldStatsProvider; + private BaseDirectoryWrapper dir; + private AnalysisRegistry analysisRegistry; + + @Before + public void setup() throws IOException { + Settings nodeSettings = settingsBuilder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()).build(); + IndexSettings settings = IndexSettingsModule.newIndexSettings("_na", nodeSettings); + SimilarityService similarityService = new SimilarityService(settings, Collections.emptyMap()); + analysisRegistry = new AnalysisRegistry(null, new Environment(nodeSettings)); + AnalysisService analysisService = analysisRegistry.build(settings); + IndicesModule indicesModule = new IndicesModule(); + MapperRegistry mapperRegistry = indicesModule.getMapperRegistry(); + MapperService service = new MapperService(settings, analysisService, similarityService, mapperRegistry, () -> null); + putMapping(service); + dir = newDirectory(); + IndexWriter w = new IndexWriter(dir, newIndexWriterConfig()); + indexDocument(service, w, "1", 50L, 50.2f, 50.2, "cherry", new DateTime(2014, 1, 1, 0, 0, 0, ISOChronology.getInstanceUTC()), + "10.10.0.10"); + indexDocument(service, w, "2", 60L, 60.1f, 60.1, "damson", new DateTime(2014, 2, 1, 0, 0, 0, ISOChronology.getInstanceUTC()), + "10.10.0.20"); + indexDocument(service, w, "3", 70L, 70.6f, 70.6, "grape", new DateTime(2014, 3, 1, 0, 0, 0, ISOChronology.getInstanceUTC()), + "10.10.0.30"); + indexDocument(service, w, "4", 80L, 80.2f, 80.2, "kiwi", new DateTime(2014, 4, 1, 0, 0, 0, ISOChronology.getInstanceUTC()), + "10.10.0.40"); + indexDocument(service, w, "5", 90L, 90.4f, 90.4, "lemon", new DateTime(2014, 5, 1, 0, 0, 0, ISOChronology.getInstanceUTC()), + "10.10.0.50"); + indexDocument(service, w, "6", 100L, 100.3f, 100.3, "orange", new DateTime(2014, 6, 1, 0, 0, 0, ISOChronology.getInstanceUTC()), + "10.10.0.60"); + directoryReader = DirectoryReader.open(w, true, true); + w.close(); + ShardId shard = new ShardId("index", "_na_", 0); + directoryReader = ElasticsearchDirectoryReader.wrap(directoryReader, shard); + IndexSearcher s = new IndexSearcher(directoryReader); + searcher = new Engine.Searcher("test", s); + fieldStatsProvider = new FieldStatsProvider(searcher, service); + } + + @After + public void teardown() throws IOException { + searcher.close(); + directoryReader.close(); + dir.close(); + analysisRegistry.close(); + } + + public void testiIsFieldWithinQueryLong() throws IOException { + assertThat(fieldStatsProvider.isFieldWithinQuery("long_field", 10L, 200L, true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("long_field", 10L, null, true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("long_field", null, 200L, true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("long_field", null, null, true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("long_field", 10L, 100L, true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("long_field", 50L, 200L, true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("long_field", 30L, 80L, true, true, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("long_field", 80L, 200L, true, true, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("long_field", 60L, 80L, true, true, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("long_field", 10L, 100L, true, false, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("long_field", 50L, 200L, false, true, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("long_field", 100L, 200L, true, true, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("long_field", 1L, 50L, true, true, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("long_field", 150L, 200L, true, true, DateTimeZone.UTC, null), + equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("long_field", 1L, 8L, true, true, DateTimeZone.UTC, null), + equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("long_field", null, 8L, true, true, DateTimeZone.UTC, null), + equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("long_field", 150L, null, true, true, DateTimeZone.UTC, null), + equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("long_field", 100L, 200L, false, true, DateTimeZone.UTC, null), + equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("long_field", 1L, 50L, true, false, DateTimeZone.UTC, null), + equalTo(Relation.DISJOINT)); + } + + public void testiIsFieldWithinQueryFloat() throws IOException { + assertThat(fieldStatsProvider.isFieldWithinQuery("float_field", 10.8f, 200.5f, true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("float_field", 10.8f, null, true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("float_field", null, 200.5f, true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("float_field", null, null, true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("float_field", 10.8f, 100.3f, true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("float_field", 50.2f, 200.5f, true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("float_field", 30.5f, 80.1f, true, true, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("float_field", 80.1f, 200.5f, true, true, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("float_field", 10.8f, 100.3f, true, false, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("float_field", 50.2f, 200.5f, false, true, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("float_field", 100.3f, 200.5f, true, true, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("float_field", 1.9f, 50.2f, true, true, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("float_field", 60.9f, 80.1f, true, true, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("float_field", 150.4f, 200.5f, true, true, DateTimeZone.UTC, null), + equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("float_field", 1.9f, 8.1f, true, true, DateTimeZone.UTC, null), + equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("float_field", null, 8.1f, true, true, DateTimeZone.UTC, null), + equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("float_field", 150.4f, null, true, true, DateTimeZone.UTC, null), + equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("float_field", 100.3f, 200.5f, false, true, DateTimeZone.UTC, null), + equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("float_field", 1.9f, 50.2f, true, false, DateTimeZone.UTC, null), + equalTo(Relation.DISJOINT)); + } + + public void testiIsFieldWithinQueryDouble() throws IOException { + assertThat(fieldStatsProvider.isFieldWithinQuery("double_field", 10.8, 200.5, true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("double_field", 10.8, null, true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("double_field", null, 200.5, true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("double_field", null, null, true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("double_field", 10.8, 100.3, true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("double_field", 50.2, 200.5, true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("double_field", 30.5, 80.1, true, true, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("double_field", 80.1, 200.5, true, true, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("double_field", 60.9, 80.1, true, true, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("double_field", 10.8, 100.3, true, false, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("double_field", 50.2, 200.5, false, true, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("double_field", 100.3, 200.5, true, true, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("double_field", 1.9, 50.2, true, true, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("double_field", 150.4, 200.5, true, true, DateTimeZone.UTC, null), + equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("double_field", 1.9, 8.1, true, true, DateTimeZone.UTC, null), + equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("double_field", null, 8.1, true, true, DateTimeZone.UTC, null), + equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("double_field", 150.4, null, true, true, DateTimeZone.UTC, null), + equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("long_field", 100.3, 200.5, false, true, DateTimeZone.UTC, null), + equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("long_field", 1.9, 50.2, true, false, DateTimeZone.UTC, null), + equalTo(Relation.DISJOINT)); + } + + public void testiIsFieldWithinQueryText() throws IOException { + assertThat(fieldStatsProvider.isFieldWithinQuery("text_field", new BytesRef("banana"), new BytesRef("zebra"), true, true, + DateTimeZone.UTC, null), equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("text_field", new BytesRef("banana"), null, true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("text_field", null, new BytesRef("zebra"), true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("text_field", null, null, true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("text_field", new BytesRef("banana"), new BytesRef("orange"), true, true, + DateTimeZone.UTC, null), equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("text_field", new BytesRef("cherry"), new BytesRef("zebra"), true, true, + DateTimeZone.UTC, null), equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("text_field", new BytesRef("banana"), new BytesRef("grape"), true, true, + DateTimeZone.UTC, null), equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("text_field", new BytesRef("grape"), new BytesRef("zebra"), true, true, + DateTimeZone.UTC, null), equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("text_field", new BytesRef("lime"), new BytesRef("mango"), true, true, + DateTimeZone.UTC, null), equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("text_field", new BytesRef("banana"), new BytesRef("orange"), true, false, + DateTimeZone.UTC, null), equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("text_field", new BytesRef("cherry"), new BytesRef("zebra"), false, true, + DateTimeZone.UTC, null), equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("text_field", new BytesRef("orange"), new BytesRef("zebra"), true, true, + DateTimeZone.UTC, null), equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("text_field", new BytesRef("apple"), new BytesRef("cherry"), true, true, + DateTimeZone.UTC, null), equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("text_field", new BytesRef("peach"), new BytesRef("zebra"), true, true, + DateTimeZone.UTC, null), equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("text_field", new BytesRef("apple"), new BytesRef("banana"), true, true, + DateTimeZone.UTC, null), equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("text_field", null, new BytesRef("banana"), true, true, DateTimeZone.UTC, null), + equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("text_field", new BytesRef("peach"), null, true, true, DateTimeZone.UTC, null), + equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("text_field", new BytesRef("orange"), new BytesRef("zebra"), false, true, + DateTimeZone.UTC, null), equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("text_field", new BytesRef("apple"), new BytesRef("cherry"), true, false, + DateTimeZone.UTC, null), equalTo(Relation.DISJOINT)); + } + + public void testiIsFieldWithinQueryKeyword() throws IOException { + assertThat(fieldStatsProvider.isFieldWithinQuery("keyword_field", new BytesRef("banana"), new BytesRef("zebra"), true, true, + DateTimeZone.UTC, null), equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("keyword_field", new BytesRef("banana"), null, true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("keyword_field", null, new BytesRef("zebra"), true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("keyword_field", null, null, true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("keyword_field", new BytesRef("banana"), new BytesRef("orange"), true, true, + DateTimeZone.UTC, null), equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("keyword_field", new BytesRef("cherry"), new BytesRef("zebra"), true, true, + DateTimeZone.UTC, null), equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("keyword_field", new BytesRef("banana"), new BytesRef("grape"), true, true, + DateTimeZone.UTC, null), equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("keyword_field", new BytesRef("grape"), new BytesRef("zebra"), true, true, + DateTimeZone.UTC, null), equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("keyword_field", new BytesRef("lime"), new BytesRef("mango"), true, true, + DateTimeZone.UTC, null), equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("keyword_field", new BytesRef("banana"), new BytesRef("orange"), true, false, + DateTimeZone.UTC, null), equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("keyword_field", new BytesRef("cherry"), new BytesRef("zebra"), false, true, + DateTimeZone.UTC, null), equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("keyword_field", new BytesRef("orange"), new BytesRef("zebra"), true, true, + DateTimeZone.UTC, null), equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("keyword_field", new BytesRef("apple"), new BytesRef("cherry"), true, true, + DateTimeZone.UTC, null), equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("keyword_field", new BytesRef("peach"), new BytesRef("zebra"), true, true, + DateTimeZone.UTC, null), equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("keyword_field", new BytesRef("apple"), new BytesRef("banana"), true, true, + DateTimeZone.UTC, null), equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("keyword_field", null, new BytesRef("banana"), true, true, DateTimeZone.UTC, null), + equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("keyword_field", new BytesRef("peach"), null, true, true, DateTimeZone.UTC, null), + equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("keyword_field", new BytesRef("orange"), new BytesRef("zebra"), false, true, + DateTimeZone.UTC, null), equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("keyword_field", new BytesRef("apple"), new BytesRef("cherry"), true, false, + DateTimeZone.UTC, null), equalTo(Relation.DISJOINT)); + } + + public void testiIsFieldWithinQueryDate() throws IOException { + assertThat(fieldStatsProvider.isFieldWithinQuery("date_field", "2013-01-01", "now", true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("date_field", "2013-01-01", null, true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("date_field", null, "now", true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("date_field", null, null, true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("date_field", "2013-01-01", "2014-06-01", true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("date_field", "2014-01-01", "now", true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("date_field", "2013-01-01", "2014-03-01", true, true, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("date_field", "2014-03-01", "now", true, true, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("date_field", "2014-03-01", "2014-05-01", true, true, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("date_field", "2013-01-01", "2014-06-01", true, false, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("date_field", "2014-01-01", "now", false, true, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("date_field", "2014-06-01", "now", true, true, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("date_field", "2013-01-01", "2014-01-01", true, true, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("date_field", "2015-01-01", "now", true, true, DateTimeZone.UTC, null), + equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("date_field", "2013-01-01", "2013-09-01", true, true, DateTimeZone.UTC, null), + equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("date_field", null, "2013-09-01", true, true, DateTimeZone.UTC, null), + equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("date_field", "2015-01-01", null, true, true, DateTimeZone.UTC, null), + equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("date_field", "2014-06-01", "now", false, true, DateTimeZone.UTC, null), + equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("date_field", "2013-01-01", "2014-01-01", true, false, DateTimeZone.UTC, null), + equalTo(Relation.DISJOINT)); + } + + public void testiIsFieldWithinQueryIp() throws IOException { + assertThat(fieldStatsProvider.isFieldWithinQuery("ip_field", "10.10.0.1", "10.20.0.1", true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("ip_field", "10.10.0.1", null, true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("ip_field", null, "10.20.0.1", true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("ip_field", null, null, true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("ip_field", "10.10.0.1", "10.10.0.60", true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("ip_field", "10.10.0.10", "10.20.0.1", true, true, DateTimeZone.UTC, null), + equalTo(Relation.WITHIN)); + assertThat(fieldStatsProvider.isFieldWithinQuery("ip_field", "10.10.0.1", "10.10.0.40", true, true, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("ip_field", "10.10.0.40", "10.20.0.1", true, true, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("ip_field", "10.10.0.30", "10.10.0.40", true, true, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("ip_field", "10.10.0.1", "10.10.0.60", true, false, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("ip_field", "10.10.0.10", "10.20.0.1", false, true, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("ip_field", "10.10.0.60", "10.20.0.1", true, true, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("ip_field", "10.0.0.1", "10.10.0.10", true, true, DateTimeZone.UTC, null), + equalTo(Relation.INTERSECTS)); + assertThat(fieldStatsProvider.isFieldWithinQuery("ip_field", "10.20.0.10", "10.20.0.1", true, true, DateTimeZone.UTC, null), + equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("ip_field", "10.0.0.1", "10.0.0.100", true, true, DateTimeZone.UTC, null), + equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("ip_field", null, "10.0.0.100", true, true, DateTimeZone.UTC, null), + equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("ip_field", "10.20.0.10", null, true, true, DateTimeZone.UTC, null), + equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("ip_field", "10.10.0.60", "10.20.0.1", false, true, DateTimeZone.UTC, null), + equalTo(Relation.DISJOINT)); + assertThat(fieldStatsProvider.isFieldWithinQuery("ip_field", "10.0.0.1", "10.10.0.10", true, false, DateTimeZone.UTC, null), + equalTo(Relation.DISJOINT)); + } + + private void putMapping(MapperService service) throws IOException { + XContentBuilder mappingbuilder = JsonXContent.contentBuilder(); + mappingbuilder.startObject(); + mappingbuilder.startObject("type"); + mappingbuilder.startObject("properties"); + mappingbuilder.startObject("long_field"); + mappingbuilder.field("type", "long"); + mappingbuilder.endObject(); + mappingbuilder.startObject("float_field"); + mappingbuilder.field("type", "float"); + mappingbuilder.endObject(); + mappingbuilder.startObject("double_field"); + mappingbuilder.field("type", "double"); + mappingbuilder.endObject(); + mappingbuilder.startObject("text_field"); + mappingbuilder.field("type", "text"); + mappingbuilder.endObject(); + mappingbuilder.startObject("keyword_field"); + mappingbuilder.field("type", "keyword"); + mappingbuilder.endObject(); + mappingbuilder.startObject("date_field"); + mappingbuilder.field("type", "date"); + mappingbuilder.endObject(); + mappingbuilder.startObject("ip_field"); + mappingbuilder.field("type", "ip"); + mappingbuilder.endObject(); + mappingbuilder.endObject(); + mappingbuilder.endObject(); + mappingbuilder.endObject(); + service.merge("type", new CompressedXContent(mappingbuilder.bytes()), MergeReason.MAPPING_UPDATE, true); + } + + private void indexDocument(MapperService service, IndexWriter writer, String id, long longValue, float floatValue, double doubleValue, + String stringValue, DateTime dateValue, String ipValue) throws IOException { + XContentBuilder docBuilder = JsonXContent.contentBuilder(); + docBuilder.startObject(); + docBuilder.field("long_field", longValue); + docBuilder.field("float_field", floatValue); + docBuilder.field("double_field", doubleValue); + docBuilder.field("text_field", stringValue); + docBuilder.field("keyword_field", stringValue); + docBuilder.field("date_field", dateValue); + docBuilder.field("ip_field", ipValue); + docBuilder.endObject(); + DocumentMapper documentMapper = service.documentMapper("type"); + ParsedDocument doc = documentMapper.parse("index", "type", id, docBuilder.bytes()); + writer.addDocument(doc.rootDoc()); + } +} diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java index 3206a5e87ae..e4d1e306af3 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java @@ -19,12 +19,20 @@ package org.elasticsearch.index.mapper; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.mapper.internal.UidFieldMapper; +import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.test.ESSingleNodeTestCase; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; + // TODO: make this a real unit test public class DocumentParserTests extends ESSingleNodeTestCase { @@ -61,4 +69,113 @@ public class DocumentParserTests extends ESSingleNodeTestCase { assertNotNull(doc.rootDoc().getField("bar")); assertNotNull(doc.rootDoc().getField(UidFieldMapper.NAME)); } + + DocumentMapper createDummyMapping(MapperService mapperService) throws Exception { + String mapping = jsonBuilder().startObject().startObject("type").startObject("properties") + .startObject("y").field("type", "object").endObject() + .startObject("x").startObject("properties") + .startObject("subx").field("type", "object").startObject("properties") + .startObject("subsubx").field("type", "object") + .endObject().endObject().endObject().endObject().endObject().endObject().endObject().endObject().string(); + + DocumentMapper defaultMapper = mapperService.documentMapperParser().parse("type", new CompressedXContent(mapping)); + return defaultMapper; + } + + // creates an object mapper, which is about 100x harder than it should be.... + ObjectMapper createObjectMapper(MapperService mapperService, String name) throws Exception { + String[] nameParts = name.split("\\."); + ContentPath path = new ContentPath(); + for (int i = 0; i < nameParts.length - 1; ++i) { + path.add(nameParts[i]); + } + ParseContext context = new ParseContext.InternalParseContext(Settings.EMPTY, + mapperService.documentMapperParser(), mapperService.documentMapper("type"), path); + Mapper.Builder builder = new ObjectMapper.Builder(nameParts[nameParts.length - 1]).enabled(true); + Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path()); + return (ObjectMapper)builder.build(builderContext); + } + + public void testEmptyMappingUpdate() throws Exception { + DocumentMapper docMapper = createDummyMapping(createIndex("test").mapperService()); + assertNull(DocumentParser.createDynamicUpdate(docMapper.mapping(), docMapper, Collections.emptyList())); + } + + public void testSingleMappingUpdate() throws Exception { + DocumentMapper docMapper = createDummyMapping(createIndex("test").mapperService()); + List updates = Collections.singletonList(new MockFieldMapper("foo")); + Mapping mapping = DocumentParser.createDynamicUpdate(docMapper.mapping(), docMapper, updates); + assertNotNull(mapping.root().getMapper("foo")); + } + + public void testSubfieldMappingUpdate() throws Exception { + DocumentMapper docMapper = createDummyMapping(createIndex("test").mapperService()); + List updates = Collections.singletonList(new MockFieldMapper("x.foo")); + Mapping mapping = DocumentParser.createDynamicUpdate(docMapper.mapping(), docMapper, updates); + Mapper xMapper = mapping.root().getMapper("x"); + assertNotNull(xMapper); + assertTrue(xMapper instanceof ObjectMapper); + assertNotNull(((ObjectMapper)xMapper).getMapper("foo")); + assertNull(((ObjectMapper)xMapper).getMapper("subx")); + } + + public void testMultipleSubfieldMappingUpdate() throws Exception { + DocumentMapper docMapper = createDummyMapping(createIndex("test").mapperService()); + List updates = new ArrayList<>(); + updates.add(new MockFieldMapper("x.foo")); + updates.add(new MockFieldMapper("x.bar")); + Mapping mapping = DocumentParser.createDynamicUpdate(docMapper.mapping(), docMapper, updates); + Mapper xMapper = mapping.root().getMapper("x"); + assertNotNull(xMapper); + assertTrue(xMapper instanceof ObjectMapper); + assertNotNull(((ObjectMapper)xMapper).getMapper("foo")); + assertNotNull(((ObjectMapper)xMapper).getMapper("bar")); + assertNull(((ObjectMapper)xMapper).getMapper("subx")); + } + + public void testDeepSubfieldMappingUpdate() throws Exception { + DocumentMapper docMapper = createDummyMapping(createIndex("test").mapperService()); + List updates = Collections.singletonList(new MockFieldMapper("x.subx.foo")); + Mapping mapping = DocumentParser.createDynamicUpdate(docMapper.mapping(), docMapper, updates); + Mapper xMapper = mapping.root().getMapper("x"); + assertNotNull(xMapper); + assertTrue(xMapper instanceof ObjectMapper); + Mapper subxMapper = ((ObjectMapper)xMapper).getMapper("subx"); + assertTrue(subxMapper instanceof ObjectMapper); + assertNotNull(((ObjectMapper)subxMapper).getMapper("foo")); + assertNull(((ObjectMapper)subxMapper).getMapper("subsubx")); + } + + public void testDeepSubfieldAfterSubfieldMappingUpdate() throws Exception { + DocumentMapper docMapper = createDummyMapping(createIndex("test").mapperService()); + List updates = new ArrayList<>(); + updates.add(new MockFieldMapper("x.a")); + updates.add(new MockFieldMapper("x.subx.b")); + Mapping mapping = DocumentParser.createDynamicUpdate(docMapper.mapping(), docMapper, updates); + Mapper xMapper = mapping.root().getMapper("x"); + assertNotNull(xMapper); + assertTrue(xMapper instanceof ObjectMapper); + assertNotNull(((ObjectMapper)xMapper).getMapper("a")); + Mapper subxMapper = ((ObjectMapper)xMapper).getMapper("subx"); + assertTrue(subxMapper instanceof ObjectMapper); + assertNotNull(((ObjectMapper)subxMapper).getMapper("b")); + } + + public void testObjectMappingUpdate() throws Exception { + MapperService mapperService = createIndex("test").mapperService(); + DocumentMapper docMapper = createDummyMapping(mapperService); + List updates = new ArrayList<>(); + updates.add(createObjectMapper(mapperService, "foo")); + updates.add(createObjectMapper(mapperService, "foo.bar")); + updates.add(new MockFieldMapper("foo.bar.baz")); + updates.add(new MockFieldMapper("foo.field")); + Mapping mapping = DocumentParser.createDynamicUpdate(docMapper.mapping(), docMapper, updates); + Mapper fooMapper = mapping.root().getMapper("foo"); + assertNotNull(fooMapper); + assertTrue(fooMapper instanceof ObjectMapper); + assertNotNull(((ObjectMapper)fooMapper).getMapper("field")); + Mapper barMapper = ((ObjectMapper)fooMapper).getMapper("bar"); + assertTrue(barMapper instanceof ObjectMapper); + assertNotNull(((ObjectMapper)barMapper).getMapper("baz")); + } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java index 748dd0a0a1a..51e88e50edd 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java @@ -28,27 +28,30 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.local.LocalTransport; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.test.cluster.TestClusterService; +import org.junit.After; import org.junit.AfterClass; import org.junit.BeforeClass; -import static org.hamcrest.CoreMatchers.instanceOf; import java.util.Collections; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.createClusterService; +import static org.hamcrest.CoreMatchers.instanceOf; + public class DynamicMappingDisabledTests extends ESSingleNodeTestCase { private static ThreadPool THREAD_POOL; - private TestClusterService clusterService; + private ClusterService clusterService; private LocalTransport transport; private TransportService transportService; private IndicesService indicesService; @@ -67,9 +70,9 @@ public class DynamicMappingDisabledTests extends ESSingleNodeTestCase { public void setUp() throws Exception { super.setUp(); settings = Settings.builder() - .put(MapperService.INDEX_MAPPER_DYNAMIC_SETTING.getKey(), false) - .build(); - clusterService = new TestClusterService(THREAD_POOL); + .put(MapperService.INDEX_MAPPER_DYNAMIC_SETTING.getKey(), false) + .build(); + clusterService = createClusterService(THREAD_POOL); transport = new LocalTransport(settings, THREAD_POOL, Version.CURRENT, new NamedWriteableRegistry()); transportService = new TransportService(transport, THREAD_POOL); indicesService = getInstanceFromNode(IndicesService.class); @@ -79,6 +82,14 @@ public class DynamicMappingDisabledTests extends ESSingleNodeTestCase { autoCreateIndex = new AutoCreateIndex(settings, indexNameExpressionResolver); } + @After + public void tearDown() throws Exception { + super.tearDown(); + clusterService.close(); + transportService.close(); + } + + @AfterClass public static void destroyThreadPool() { ThreadPool.terminate(THREAD_POOL, 30, TimeUnit.SECONDS); @@ -88,8 +99,8 @@ public class DynamicMappingDisabledTests extends ESSingleNodeTestCase { public void testDynamicDisabled() { TransportIndexAction action = new TransportIndexAction(settings, transportService, clusterService, - indicesService, THREAD_POOL, shardStateAction, null, null, actionFilters, indexNameExpressionResolver, - autoCreateIndex); + indicesService, THREAD_POOL, shardStateAction, null, null, actionFilters, indexNameExpressionResolver, + autoCreateIndex); IndexRequest request = new IndexRequest("index", "type", "1"); request.source("foo", 3); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java index 97c030e9eec..d2a8080be86 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java @@ -42,6 +42,7 @@ import org.elasticsearch.index.mapper.core.TextFieldMapper; import org.elasticsearch.test.ESSingleNodeTestCase; import java.io.IOException; +import java.util.List; import static java.util.Collections.emptyMap; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @@ -211,7 +212,9 @@ public class DynamicMappingTests extends ESSingleNodeTestCase { ctx.reset(XContentHelper.createParser(source.source()), new ParseContext.Document(), source); assertEquals(XContentParser.Token.START_OBJECT, ctx.parser().nextToken()); ctx.parser().nextToken(); - return DocumentParser.parseObject(ctx, mapper.root(), true); + DocumentParser.parseObjectOrNested(ctx, mapper.root(), true); + Mapping mapping = DocumentParser.createDynamicUpdate(mapper.mapping(), mapper, ctx.getDynamicMappers()); + return mapping == null ? null : mapping.root(); } public void testDynamicMappingsNotNeeded() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java b/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java index c5dbd653bfe..cb9a64d357c 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java @@ -19,12 +19,8 @@ package org.elasticsearch.index.mapper; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; -import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -59,7 +55,7 @@ public class FieldTypeLookupTests extends ESTestCase { public void testAddNewField() { FieldTypeLookup lookup = new FieldTypeLookup(); - FakeFieldMapper f = new FakeFieldMapper("foo"); + MockFieldMapper f = new MockFieldMapper("foo"); FieldTypeLookup lookup2 = lookup.copyAndAddAll("type", newList(f), randomBoolean()); assertNull(lookup.get("foo")); assertNull(lookup.get("bar")); @@ -73,8 +69,8 @@ public class FieldTypeLookupTests extends ESTestCase { } public void testAddExistingField() { - FakeFieldMapper f = new FakeFieldMapper("foo"); - FakeFieldMapper f2 = new FakeFieldMapper("foo"); + MockFieldMapper f = new MockFieldMapper("foo"); + MockFieldMapper f2 = new MockFieldMapper("foo"); FieldTypeLookup lookup = new FieldTypeLookup(); lookup = lookup.copyAndAddAll("type1", newList(f), randomBoolean()); FieldTypeLookup lookup2 = lookup.copyAndAddAll("type2", newList(f2), randomBoolean()); @@ -84,8 +80,8 @@ public class FieldTypeLookupTests extends ESTestCase { } public void testAddExistingIndexName() { - FakeFieldMapper f = new FakeFieldMapper("foo"); - FakeFieldMapper f2 = new FakeFieldMapper("bar"); + MockFieldMapper f = new MockFieldMapper("foo"); + MockFieldMapper f2 = new MockFieldMapper("bar"); FieldTypeLookup lookup = new FieldTypeLookup(); lookup = lookup.copyAndAddAll("type1", newList(f), randomBoolean()); FieldTypeLookup lookup2 = lookup.copyAndAddAll("type2", newList(f2), randomBoolean()); @@ -96,8 +92,8 @@ public class FieldTypeLookupTests extends ESTestCase { } public void testAddExistingFullName() { - FakeFieldMapper f = new FakeFieldMapper("foo"); - FakeFieldMapper f2 = new FakeFieldMapper("foo"); + MockFieldMapper f = new MockFieldMapper("foo"); + MockFieldMapper f2 = new MockFieldMapper("foo"); FieldTypeLookup lookup = new FieldTypeLookup(); try { lookup.copyAndAddAll("type2", newList(f2), randomBoolean()); @@ -107,12 +103,13 @@ public class FieldTypeLookupTests extends ESTestCase { } public void testCheckCompatibilityMismatchedTypes() { - FieldMapper f1 = new FakeFieldMapper("foo"); + FieldMapper f1 = new MockFieldMapper("foo"); FieldTypeLookup lookup = new FieldTypeLookup(); lookup = lookup.copyAndAddAll("type", newList(f1), randomBoolean()); - MappedFieldType ft2 = FakeFieldMapper.makeOtherFieldType("foo"); - FieldMapper f2 = new FakeFieldMapper("foo", ft2); + OtherFakeFieldType ft2 = new OtherFakeFieldType(); + ft2.setName("foo"); + FieldMapper f2 = new MockFieldMapper("foo", ft2); try { lookup.copyAndAddAll("type2", newList(f2), false); fail("expected type mismatch"); @@ -129,13 +126,14 @@ public class FieldTypeLookupTests extends ESTestCase { } public void testCheckCompatibilityConflict() { - FieldMapper f1 = new FakeFieldMapper("foo"); + FieldMapper f1 = new MockFieldMapper("foo"); FieldTypeLookup lookup = new FieldTypeLookup(); lookup = lookup.copyAndAddAll("type", newList(f1), randomBoolean()); - MappedFieldType ft2 = FakeFieldMapper.makeFieldType("foo"); + MappedFieldType ft2 = new MockFieldMapper.FakeFieldType(); + ft2.setName("foo"); ft2.setBoost(2.0f); - FieldMapper f2 = new FakeFieldMapper("foo", ft2); + FieldMapper f2 = new MockFieldMapper("foo", ft2); try { // different type lookup.copyAndAddAll("type2", newList(f2), false); @@ -146,9 +144,10 @@ public class FieldTypeLookupTests extends ESTestCase { lookup.copyAndAddAll("type", newList(f2), false); // boost is updateable, so ok since we are implicitly updating all types lookup.copyAndAddAll("type2", newList(f2), true); // boost is updateable, so ok if forcing // now with a non changeable setting - MappedFieldType ft3 = FakeFieldMapper.makeFieldType("foo"); + MappedFieldType ft3 = new MockFieldMapper.FakeFieldType(); + ft3.setName("foo"); ft3.setStored(true); - FieldMapper f3 = new FakeFieldMapper("foo", ft3); + FieldMapper f3 = new MockFieldMapper("foo", ft3); try { lookup.copyAndAddAll("type2", newList(f3), false); fail("expected conflict"); @@ -165,8 +164,8 @@ public class FieldTypeLookupTests extends ESTestCase { } public void testSimpleMatchFullNames() { - FakeFieldMapper f1 = new FakeFieldMapper("foo"); - FakeFieldMapper f2 = new FakeFieldMapper("bar"); + MockFieldMapper f1 = new MockFieldMapper("foo"); + MockFieldMapper f2 = new MockFieldMapper("bar"); FieldTypeLookup lookup = new FieldTypeLookup(); lookup = lookup.copyAndAddAll("type", newList(f1, f2), randomBoolean()); Collection names = lookup.simpleMatchToFullName("b*"); @@ -175,7 +174,7 @@ public class FieldTypeLookupTests extends ESTestCase { } public void testIteratorImmutable() { - FakeFieldMapper f1 = new FakeFieldMapper("foo"); + MockFieldMapper f1 = new MockFieldMapper("foo"); FieldTypeLookup lookup = new FieldTypeLookup(); lookup = lookup.copyAndAddAll("type", newList(f1), randomBoolean()); @@ -194,59 +193,6 @@ public class FieldTypeLookupTests extends ESTestCase { return Arrays.asList(mapper); } - // this sucks how much must be overridden just do get a dummy field mapper... - static class FakeFieldMapper extends FieldMapper { - static Settings dummySettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT.id).build(); - public FakeFieldMapper(String fullName) { - super(fullName, makeFieldType(fullName), makeFieldType(fullName), dummySettings, null, null); - } - public FakeFieldMapper(String fullName, MappedFieldType fieldType) { - super(fullName, fieldType, fieldType, dummySettings, null, null); - } - static MappedFieldType makeFieldType(String fullName) { - FakeFieldType fieldType = new FakeFieldType(); - fieldType.setName(fullName); - return fieldType; - } - static MappedFieldType makeOtherFieldType(String fullName) { - OtherFakeFieldType fieldType = new OtherFakeFieldType(); - fieldType.setName(fullName); - return fieldType; - } - static class FakeFieldType extends MappedFieldType { - public FakeFieldType() {} - protected FakeFieldType(FakeFieldType ref) { - super(ref); - } - @Override - public MappedFieldType clone() { - return new FakeFieldType(this); - } - @Override - public String typeName() { - return "faketype"; - } - } - static class OtherFakeFieldType extends MappedFieldType { - public OtherFakeFieldType() {} - protected OtherFakeFieldType(OtherFakeFieldType ref) { - super(ref); - } - @Override - public MappedFieldType clone() { - return new OtherFakeFieldType(this); - } - @Override - public String typeName() { - return "otherfaketype"; - } - } - @Override - protected String contentType() { return null; } - @Override - protected void parseCreateField(ParseContext context, List list) throws IOException {} - } - private int size(Iterator iterator) { if (iterator == null) { throw new NullPointerException("iterator"); @@ -258,4 +204,23 @@ public class FieldTypeLookupTests extends ESTestCase { } return count; } + + static class OtherFakeFieldType extends MappedFieldType { + public OtherFakeFieldType() { + } + + protected OtherFakeFieldType(OtherFakeFieldType ref) { + super(ref); + } + + @Override + public MappedFieldType clone() { + return new OtherFakeFieldType(this); + } + + @Override + public String typeName() { + return "otherfaketype"; + } + } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java b/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java index 966edf82621..b7194a3829b 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java @@ -130,12 +130,6 @@ public abstract class FieldTypeTestCase extends ESTestCase { other.setSimilarity(new BM25SimilarityProvider("bar", Settings.EMPTY)); } }, - new Modifier("norms.loading", true) { - @Override - public void modify(MappedFieldType ft) { - ft.setNormsLoading(MappedFieldType.Loading.LAZY); - } - }, new Modifier("fielddata", true) { @Override public void modify(MappedFieldType ft) { @@ -217,7 +211,6 @@ public abstract class FieldTypeTestCase extends ESTestCase { ", searchAnalyzer=" + ft.searchAnalyzer() + ", searchQuoteAnalyzer=" + ft.searchQuoteAnalyzer() + ", similarity=" + ft.similarity() + - ", normsLoading=" + ft.normsLoading() + ", fieldDataType=" + ft.fieldDataType() + ", nullValue=" + ft.nullValue() + ", nullValueAsString='" + ft.nullValueAsString() + "'" + diff --git a/core/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java index 53d5e1744eb..501c538b870 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java @@ -223,7 +223,7 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase { } public void testRandom() throws Exception { - boolean omitNorms = false; + boolean norms = true; boolean stored = false; boolean enabled = true; boolean tv_stored = false; @@ -239,7 +239,7 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase { allDefault = false; mappingBuilder.startObject("_all"); if (randomBoolean()) { - booleanOptionList.add(new Tuple<>("omit_norms", omitNorms = randomBoolean())); + booleanOptionList.add(new Tuple<>("norms", norms = randomBoolean())); } if (randomBoolean()) { booleanOptionList.add(new Tuple<>("store", stored = randomBoolean())); @@ -272,7 +272,7 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase { DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); String mapping = mappingBuilder.endObject().endObject().bytes().toUtf8(); - logger.info(mapping); + logger.info("Mapping: {}", mapping); DocumentMapper docMapper = parser.parse("test", new CompressedXContent(mapping)); String builtMapping = docMapper.mappingSource().string(); // reparse it @@ -285,7 +285,7 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase { Document doc = builtDocMapper.parse("test", "test", "1", new BytesArray(json)).rootDoc(); AllField field = (AllField) doc.getField("_all"); if (enabled) { - assertThat(field.fieldType().omitNorms(), equalTo(omitNorms)); + assertThat(field.fieldType().omitNorms(), equalTo(!norms)); assertThat(field.fieldType().stored(), equalTo(stored)); assertThat(field.fieldType().storeTermVectorOffsets(), equalTo(tv_offsets)); assertThat(field.fieldType().storeTermVectorPayloads(), equalTo(tv_payloads)); @@ -433,19 +433,6 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase { assertThat(e.getDetailedMessage(), containsString("[_all] is always tokenized and cannot have doc values")); } - - mapping = jsonBuilder().startObject().startObject("type") - .startObject("_all") - .startObject("fielddata") - .field("format", "doc_values") - .endObject().endObject().endObject().endObject().string(); - Settings legacySettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build(); - try { - createIndex("test_old", legacySettings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - fail(); - } catch (MapperParsingException e) { - assertThat(e.getDetailedMessage(), containsString("[_all] is always tokenized and cannot have doc values")); - } } public void testAutoBoost() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/boost/CustomBoostMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/boost/CustomBoostMappingTests.java index 91a0ca15cd4..490477d67e7 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/boost/CustomBoostMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/boost/CustomBoostMappingTests.java @@ -85,18 +85,17 @@ public class CustomBoostMappingTests extends ESSingleNodeTestCase { } public void testBackCompatFieldMappingBoostValues() throws Exception { - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") - .startObject("s_field").field("type", "keyword").field("boost", 2.0f).endObject() - .startObject("l_field").field("type", "long").field("boost", 3.0f).startObject("norms").field("enabled", true).endObject().endObject() - .startObject("i_field").field("type", "integer").field("boost", 4.0f).startObject("norms").field("enabled", true).endObject().endObject() - .startObject("sh_field").field("type", "short").field("boost", 5.0f).startObject("norms").field("enabled", true).endObject().endObject() - .startObject("b_field").field("type", "byte").field("boost", 6.0f).startObject("norms").field("enabled", true).endObject().endObject() - .startObject("d_field").field("type", "double").field("boost", 7.0f).startObject("norms").field("enabled", true).endObject().endObject() - .startObject("f_field").field("type", "float").field("boost", 8.0f).startObject("norms").field("enabled", true).endObject().endObject() - .startObject("date_field").field("type", "date").field("boost", 9.0f).startObject("norms").field("enabled", true).endObject().endObject() - .endObject().endObject().endObject().string(); - { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") + .startObject("s_field").field("type", "keyword").field("boost", 2.0f).endObject() + .startObject("l_field").field("type", "long").field("boost", 3.0f).endObject() + .startObject("i_field").field("type", "integer").field("boost", 4.0f).endObject() + .startObject("sh_field").field("type", "short").field("boost", 5.0f).endObject() + .startObject("b_field").field("type", "byte").field("boost", 6.0f).endObject() + .startObject("d_field").field("type", "double").field("boost", 7.0f).endObject() + .startObject("f_field").field("type", "float").field("boost", 8.0f).endObject() + .startObject("date_field").field("type", "date").field("boost", 9.0f).endObject() + .endObject().endObject().endObject().string(); IndexService indexService = createIndex("test", BW_SETTINGS); QueryShardContext context = indexService.newQueryShardContext(); DocumentMapper mapper = indexService.mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); @@ -122,16 +121,34 @@ public class CustomBoostMappingTests extends ESSingleNodeTestCase { .endObject().bytes()); assertThat(doc.rootDoc().getField("s_field").boost(), equalTo(2.0f)); + assertThat(doc.rootDoc().getField("s_field").fieldType().omitNorms(), equalTo(false)); assertThat(doc.rootDoc().getField("l_field").boost(), equalTo(3.0f)); + assertThat(doc.rootDoc().getField("l_field").fieldType().omitNorms(), equalTo(false)); assertThat(doc.rootDoc().getField("i_field").boost(), equalTo(4.0f)); + assertThat(doc.rootDoc().getField("i_field").fieldType().omitNorms(), equalTo(false)); assertThat(doc.rootDoc().getField("sh_field").boost(), equalTo(5.0f)); + assertThat(doc.rootDoc().getField("sh_field").fieldType().omitNorms(), equalTo(false)); assertThat(doc.rootDoc().getField("b_field").boost(), equalTo(6.0f)); + assertThat(doc.rootDoc().getField("b_field").fieldType().omitNorms(), equalTo(false)); assertThat(doc.rootDoc().getField("d_field").boost(), equalTo(7.0f)); + assertThat(doc.rootDoc().getField("d_field").fieldType().omitNorms(), equalTo(false)); assertThat(doc.rootDoc().getField("f_field").boost(), equalTo(8.0f)); + assertThat(doc.rootDoc().getField("f_field").fieldType().omitNorms(), equalTo(false)); assertThat(doc.rootDoc().getField("date_field").boost(), equalTo(9.0f)); + assertThat(doc.rootDoc().getField("date_field").fieldType().omitNorms(), equalTo(false)); } { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") + .startObject("s_field").field("type", "keyword").field("boost", 2.0f).endObject() + .startObject("l_field").field("type", "long").field("boost", 3.0f).endObject() + .startObject("i_field").field("type", "integer").field("boost", 4.0f).endObject() + .startObject("sh_field").field("type", "short").field("boost", 5.0f).endObject() + .startObject("b_field").field("type", "byte").field("boost", 6.0f).endObject() + .startObject("d_field").field("type", "double").field("boost", 7.0f).endObject() + .startObject("f_field").field("type", "float").field("boost", 8.0f).endObject() + .startObject("date_field").field("type", "date").field("boost", 9.0f).endObject() + .endObject().endObject().endObject().string(); IndexService indexService = createIndex("text"); QueryShardContext context = indexService.newQueryShardContext(); DocumentMapper mapper = indexService.mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); @@ -157,13 +174,21 @@ public class CustomBoostMappingTests extends ESSingleNodeTestCase { .endObject().bytes()); assertThat(doc.rootDoc().getField("s_field").boost(), equalTo(1f)); + assertThat(doc.rootDoc().getField("s_field").fieldType().omitNorms(), equalTo(true)); assertThat(doc.rootDoc().getField("l_field").boost(), equalTo(1f)); + assertThat(doc.rootDoc().getField("l_field").fieldType().omitNorms(), equalTo(true)); assertThat(doc.rootDoc().getField("i_field").boost(), equalTo(1f)); + assertThat(doc.rootDoc().getField("i_field").fieldType().omitNorms(), equalTo(true)); assertThat(doc.rootDoc().getField("sh_field").boost(), equalTo(1f)); + assertThat(doc.rootDoc().getField("sh_field").fieldType().omitNorms(), equalTo(true)); assertThat(doc.rootDoc().getField("b_field").boost(), equalTo(1f)); + assertThat(doc.rootDoc().getField("b_field").fieldType().omitNorms(), equalTo(true)); assertThat(doc.rootDoc().getField("d_field").boost(), equalTo(1f)); + assertThat(doc.rootDoc().getField("d_field").fieldType().omitNorms(), equalTo(true)); assertThat(doc.rootDoc().getField("f_field").boost(), equalTo(1f)); + assertThat(doc.rootDoc().getField("f_field").fieldType().omitNorms(), equalTo(true)); assertThat(doc.rootDoc().getField("date_field").boost(), equalTo(1f)); + assertThat(doc.rootDoc().getField("date_field").fieldType().omitNorms(), equalTo(true)); } } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/boost/FieldLevelBoostTests.java b/core/src/test/java/org/elasticsearch/index/mapper/boost/FieldLevelBoostTests.java index b1fde6bdd67..90121e66ea8 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/boost/FieldLevelBoostTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/boost/FieldLevelBoostTests.java @@ -102,13 +102,13 @@ public class FieldLevelBoostTests extends ESSingleNodeTestCase { public void testBackCompatFieldLevelMappingBoost() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("person").startObject("properties") .startObject("str_field").field("type", "keyword").field("boost", "2.0").endObject() - .startObject("int_field").field("type", "integer").field("boost", "3.0").startObject("norms").field("enabled", true).endObject().endObject() - .startObject("byte_field").field("type", "byte").field("boost", "4.0").startObject("norms").field("enabled", true).endObject().endObject() - .startObject("date_field").field("type", "date").field("boost", "5.0").startObject("norms").field("enabled", true).endObject().endObject() - .startObject("double_field").field("type", "double").field("boost", "6.0").startObject("norms").field("enabled", true).endObject().endObject() - .startObject("float_field").field("type", "float").field("boost", "7.0").startObject("norms").field("enabled", true).endObject().endObject() - .startObject("long_field").field("type", "long").field("boost", "8.0").startObject("norms").field("enabled", true).endObject().endObject() - .startObject("short_field").field("type", "short").field("boost", "9.0").startObject("norms").field("enabled", true).endObject().endObject() + .startObject("int_field").field("type", "integer").field("boost", "3.0").endObject() + .startObject("byte_field").field("type", "byte").field("boost", "4.0").endObject() + .startObject("date_field").field("type", "date").field("boost", "5.0").endObject() + .startObject("double_field").field("type", "double").field("boost", "6.0").endObject() + .startObject("float_field").field("type", "float").field("boost", "7.0").endObject() + .startObject("long_field").field("type", "long").field("boost", "8.0").endObject() + .startObject("short_field").field("type", "short").field("boost", "9.0").endObject() .string(); { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/BooleanFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/BooleanFieldMapperTests.java index 7bed3ce091f..74fc98fddbe 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/BooleanFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/BooleanFieldMapperTests.java @@ -85,7 +85,7 @@ public class BooleanFieldMapperTests extends ESSingleNodeTestCase { try (Directory dir = new RAMDirectory(); IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(getRandom())))) { w.addDocuments(doc.docs()); - try (DirectoryReader reader = DirectoryReader.open(w, true)) { + try (DirectoryReader reader = DirectoryReader.open(w)) { final LeafReader leaf = reader.leaves().get(0).reader(); // boolean fields are indexed and have doc values by default assertEquals(new BytesRef("T"), leaf.terms("field").iterator().next()); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/KeywordFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/KeywordFieldMapperTests.java index bdb3f9762ef..28867ed1f73 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/KeywordFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/KeywordFieldMapperTests.java @@ -24,21 +24,33 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.IndexableFieldType; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; import org.junit.Before; import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; import static org.hamcrest.Matchers.equalTo; public class KeywordFieldMapperTests extends ESSingleNodeTestCase { + @Override + protected Collection> getPlugins() { + return pluginList(InternalSettingsPlugin.class); + } + IndexService indexService; DocumentMapperParser parser; @@ -200,4 +212,82 @@ public class KeywordFieldMapperTests extends ESSingleNodeTestCase { assertEquals(1, fields.length); assertEquals(DocValuesType.NONE, fields[0].fieldType().docValuesType()); } + + public void testIndexOptions() throws IOException { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "keyword") + .field("index_options", "freqs").endObject().endObject() + .endObject().endObject().string(); + + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + + assertEquals(mapping, mapper.mappingSource().toString()); + + ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + .startObject() + .field("field", "1234") + .endObject() + .bytes()); + + IndexableField[] fields = doc.rootDoc().getFields("field"); + assertEquals(2, fields.length); + assertEquals(IndexOptions.DOCS_AND_FREQS, fields[0].fieldType().indexOptions()); + + for (String indexOptions : Arrays.asList("positions", "offsets")) { + final String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "keyword") + .field("index_options", indexOptions).endObject().endObject() + .endObject().endObject().string(); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> parser.parse("type", new CompressedXContent(mapping2))); + assertEquals("The [keyword] field does not support positions, got [index_options]=" + indexOptions, e.getMessage()); + } + } + + public void testBoost() throws IOException { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "keyword").field("boost", 2f).endObject().endObject() + .endObject().endObject().string(); + + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + + assertEquals(mapping, mapper.mappingSource().toString()); + } + + public void testBoostImplicitlyEnablesNormsOnOldIndex() throws IOException { + indexService = createIndex("test2", + Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_3_0).build()); + parser = indexService.mapperService().documentMapperParser(); + + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "keyword").field("boost", 2f).endObject().endObject() + .endObject().endObject().string(); + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + + String expectedMapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "keyword") + .field("boost", 2f).field("norms", true).endObject().endObject() + .endObject().endObject().string(); + assertEquals(expectedMapping, mapper.mappingSource().toString()); + } + + public void testEnableNorms() throws IOException { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "keyword").field("norms", true).endObject().endObject() + .endObject().endObject().string(); + + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + + assertEquals(mapping, mapper.mappingSource().toString()); + + ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + .startObject() + .field("field", "1234") + .endObject() + .bytes()); + + IndexableField[] fields = doc.rootDoc().getFields("field"); + assertEquals(2, fields.length); + assertFalse(fields[0].fieldType().omitNorms()); + } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/StringMappingUpgradeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/StringMappingUpgradeTests.java new file mode 100644 index 00000000000..d49f50da0ab --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/StringMappingUpgradeTests.java @@ -0,0 +1,187 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper.core; + +import com.carrotsearch.randomizedtesting.generators.RandomPicks; + +import org.apache.lucene.index.IndexOptions; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.DocumentMapperParser; +import org.elasticsearch.index.mapper.FieldMapper; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; + +public class StringMappingUpgradeTests extends ESSingleNodeTestCase { + + @Override + protected Collection> getPlugins() { + return pluginList(InternalSettingsPlugin.class); + } + + public void testUpgradeDefaults() throws IOException { + IndexService indexService = createIndex("test"); + DocumentMapperParser parser = indexService.mapperService().documentMapperParser(); + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "string").endObject().endObject() + .endObject().endObject().string(); + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + FieldMapper field = mapper.mappers().getMapper("field"); + assertThat(field, instanceOf(TextFieldMapper.class)); + } + + public void testUpgradeAnalyzedString() throws IOException { + IndexService indexService = createIndex("test"); + DocumentMapperParser parser = indexService.mapperService().documentMapperParser(); + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "string").field("index", "analyzed").endObject().endObject() + .endObject().endObject().string(); + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + FieldMapper field = mapper.mappers().getMapper("field"); + assertThat(field, instanceOf(TextFieldMapper.class)); + } + + public void testUpgradeNotAnalyzedString() throws IOException { + IndexService indexService = createIndex("test"); + DocumentMapperParser parser = indexService.mapperService().documentMapperParser(); + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "string") + .field("index", "not_analyzed").endObject().endObject() + .endObject().endObject().string(); + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + FieldMapper field = mapper.mappers().getMapper("field"); + assertThat(field, instanceOf(KeywordFieldMapper.class)); + } + + public void testUpgradeNotIndexedString() throws IOException { + IndexService indexService = createIndex("test"); + DocumentMapperParser parser = indexService.mapperService().documentMapperParser(); + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "string").field("index", "no").endObject().endObject() + .endObject().endObject().string(); + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + FieldMapper field = mapper.mappers().getMapper("field"); + assertThat(field, instanceOf(KeywordFieldMapper.class)); + assertEquals(IndexOptions.NONE, field.fieldType().indexOptions()); + } + + public void testNotSupportedUpgrade() throws IOException { + IndexService indexService = createIndex("test"); + DocumentMapperParser parser = indexService.mapperService().documentMapperParser(); + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "string").field("analyzer", "keyword").endObject().endObject() + .endObject().endObject().string(); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> parser.parse("type", new CompressedXContent(mapping))); + assertThat(e.getMessage(), containsString("The [string] type is removed in 5.0")); + } + + public void testUpgradeRandomMapping() throws IOException { + final int iters = 20; + for (int i = 0; i < iters; ++i) { + doTestUpgradeRandomMapping(i); + } + } + + private void doTestUpgradeRandomMapping(int iter) throws IOException { + IndexService indexService; + boolean oldIndex = randomBoolean(); + String indexName = "test" + iter; + if (oldIndex) { + Settings settings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_3_0) + .build(); + indexService = createIndex(indexName, settings); + } else { + indexService = createIndex(indexName); + } + DocumentMapperParser parser = indexService.mapperService().documentMapperParser(); + XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "string"); + boolean keyword = randomBoolean(); + boolean hasNorms = keyword == false; + boolean shouldUpgrade = true; + if (keyword) { + mapping.field("index", randomBoolean() ? "not_analyzed" : "no"); + } else if (randomBoolean()) { + mapping.field("index", "analyzed"); + } + if (randomBoolean()) { + mapping.field("store", RandomPicks.randomFrom(random(), Arrays.asList("yes", "no", true, false))); + } + if (keyword && randomBoolean()) { + mapping.field("doc_values", randomBoolean()); + } + if (randomBoolean()) { + hasNorms = randomBoolean(); + if (randomBoolean()) { + mapping.field("omit_norms", hasNorms == false); + } else { + mapping.field("norms", Collections.singletonMap("enabled", hasNorms)); + } + } + if (randomBoolean()) { + mapping.startObject("fields").startObject("raw").field("type", "keyword").endObject().endObject(); + } + if (randomBoolean()) { + mapping.field("copy_to", "bar"); + } + if (randomBoolean()) { + // this option is not upgraded automatically + mapping.field("index_options", "docs"); + shouldUpgrade = false; + } + mapping.endObject().endObject().endObject().endObject(); + + if (oldIndex == false && shouldUpgrade == false) { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> parser.parse("type", new CompressedXContent(mapping.string()))); + assertThat(e.getMessage(), containsString("The [string] type is removed in 5.0")); + } else { + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping.string())); + FieldMapper field = mapper.mappers().getMapper("field"); + if (oldIndex) { + assertThat(field, instanceOf(StringFieldMapper.class)); + } else if (keyword) { + assertThat(field, instanceOf(KeywordFieldMapper.class)); + } else { + assertThat(field, instanceOf(TextFieldMapper.class)); + } + if (field.fieldType().indexOptions() != IndexOptions.NONE) { + assertEquals(hasNorms, field.fieldType().omitNorms() == false); + } + } + } +} diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/TextFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/TextFieldMapperTests.java index 3a9d5b46ab9..8dba6dd3fe3 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/TextFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/TextFieldMapperTests.java @@ -132,9 +132,7 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field") .field("type", "text") - .startObject("norms") - .field("enabled", false) - .endObject() + .field("norms", false) .endObject().endObject() .endObject().endObject().string(); @@ -386,4 +384,5 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase { assertThat(doc.rootDoc().getField("field6").fieldType().storeTermVectorPositions(), equalTo(true)); assertThat(doc.rootDoc().getField("field6").fieldType().storeTermVectorPayloads(), equalTo(true)); } + } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java index 3056b63b4c0..4f4bbc65699 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java @@ -19,11 +19,11 @@ package org.elasticsearch.index.mapper.date; -import org.apache.lucene.analysis.NumericTokenStream.NumericTermAttribute; +import org.apache.lucene.analysis.LegacyNumericTokenStream.LegacyNumericTermAttribute; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexableField; -import org.apache.lucene.search.NumericRangeQuery; +import org.apache.lucene.search.LegacyNumericRangeQuery; import org.apache.lucene.util.Constants; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; @@ -189,7 +189,7 @@ public class SimpleDateMappingTests extends ESSingleNodeTestCase { TokenStream tokenStream = doc.rootDoc().getField(fieldA).tokenStream(defaultMapper.mappers().indexAnalyzer(), null); tokenStream.reset(); - NumericTermAttribute nta = tokenStream.addAttribute(NumericTermAttribute.class); + LegacyNumericTermAttribute nta = tokenStream.addAttribute(LegacyNumericTermAttribute.class); List values = new ArrayList<>(); while(tokenStream.incrementToken()) { values.add(nta.getRawValue()); @@ -197,7 +197,7 @@ public class SimpleDateMappingTests extends ESSingleNodeTestCase { tokenStream = doc.rootDoc().getField(fieldB).tokenStream(defaultMapper.mappers().indexAnalyzer(), null); tokenStream.reset(); - nta = tokenStream.addAttribute(NumericTermAttribute.class); + nta = tokenStream.addAttribute(LegacyNumericTermAttribute.class); int pos = 0; while(tokenStream.incrementToken()) { assertThat(values.get(pos++), equalTo(nta.getRawValue())); @@ -256,10 +256,10 @@ public class SimpleDateMappingTests extends ESSingleNodeTestCase { .bytes()); assertThat(((LongFieldMapper.CustomLongNumericField) doc.rootDoc().getField("date_field")).numericAsString(), equalTo(Long.toString(new DateTime(TimeValue.timeValueHours(10).millis(), DateTimeZone.UTC).getMillis()))); - NumericRangeQuery rangeQuery; + LegacyNumericRangeQuery rangeQuery; try { SearchContext.setCurrent(new TestSearchContext(null)); - rangeQuery = (NumericRangeQuery) defaultMapper.mappers().smartNameFieldMapper("date_field").fieldType().rangeQuery("10:00:00", "11:00:00", true, true).rewrite(null); + rangeQuery = (LegacyNumericRangeQuery) defaultMapper.mappers().smartNameFieldMapper("date_field").fieldType().rangeQuery("10:00:00", "11:00:00", true, true).rewrite(null); } finally { SearchContext.removeCurrent(); } @@ -282,10 +282,10 @@ public class SimpleDateMappingTests extends ESSingleNodeTestCase { .bytes()); assertThat(((LongFieldMapper.CustomLongNumericField) doc.rootDoc().getField("date_field")).numericAsString(), equalTo(Long.toString(new DateTime(TimeValue.timeValueHours(34).millis(), DateTimeZone.UTC).getMillis()))); - NumericRangeQuery rangeQuery; + LegacyNumericRangeQuery rangeQuery; try { SearchContext.setCurrent(new TestSearchContext(null)); - rangeQuery = (NumericRangeQuery) defaultMapper.mappers().smartNameFieldMapper("date_field").fieldType().rangeQuery("Jan 02 10:00:00", "Jan 02 11:00:00", true, true).rewrite(null); + rangeQuery = (LegacyNumericRangeQuery) defaultMapper.mappers().smartNameFieldMapper("date_field").fieldType().rangeQuery("Jan 02 10:00:00", "Jan 02 11:00:00", true, true).rewrite(null); } finally { SearchContext.removeCurrent(); } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java index 03c14ee1a45..8c25713ce3d 100755 --- a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.mapper.externalvalues; -import com.spatial4j.core.shape.Point; +import org.locationtech.spatial4j.shape.Point; import org.apache.lucene.document.Field; import org.elasticsearch.Version; import org.elasticsearch.common.Strings; diff --git a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/SimpleExternalMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/SimpleExternalMappingTests.java index 558e3bc83fb..9d6236234af 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/SimpleExternalMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/SimpleExternalMappingTests.java @@ -56,7 +56,7 @@ public class SimpleExternalMappingTests extends ESSingleNodeTestCase { } public void testExternalValues() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); IndexService indexService = createIndex("test", settings); MapperRegistry mapperRegistry = new MapperRegistry( @@ -101,7 +101,7 @@ public class SimpleExternalMappingTests extends ESSingleNodeTestCase { } public void testExternalValuesWithMultifield() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); IndexService indexService = createIndex("test", settings); Map mapperParsers = new HashMap<>(); @@ -159,7 +159,7 @@ public class SimpleExternalMappingTests extends ESSingleNodeTestCase { } public void testExternalValuesWithMultifieldTwoLevels() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); IndexService indexService = createIndex("test", settings); Map mapperParsers = new HashMap<>(); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java index ed6c574a865..6b9282e2704 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java @@ -66,7 +66,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).endObject().endObject() .endObject().endObject().string(); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); @@ -96,7 +96,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { .field("geohash", true).endObject().endObject() .endObject().endObject().string(); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); @@ -116,7 +116,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true) .field("geohash", true).endObject().endObject().endObject().endObject().string(); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); @@ -136,7 +136,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true) .field("geohash", true).endObject().endObject().endObject().endObject().string(); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); @@ -156,7 +156,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true) .endObject().endObject().endObject().endObject().string(); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); @@ -172,7 +172,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { } public void testNormalizeLatLonValuesDefault() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); // default to normalize XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("point").field("type", "geo_point"); @@ -222,7 +222,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { } public void testValidateLatLonValues() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true); if (version.before(Version.V_2_2_0)) { @@ -285,7 +285,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { } public void testNoValidateLatLonValues() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true); if (version.before(Version.V_2_2_0)) { @@ -332,7 +332,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true) .field("store", true).endObject().endObject().endObject().endObject().string(); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); @@ -359,7 +359,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true) .field("store", true).endObject().endObject().endObject().endObject().string(); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); @@ -395,7 +395,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true) .endObject().endObject().endObject().endObject().string(); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); @@ -419,7 +419,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("store", true).endObject().endObject() .endObject().endObject().string(); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); @@ -445,7 +445,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true) .field("store", true).endObject().endObject().endObject().endObject().string(); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); @@ -481,7 +481,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).endObject().endObject() .endObject().endObject().string(); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); @@ -506,7 +506,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { .startObject("point").field("match", "point*").startObject("mapping").field("type", "geo_point") .field("lat_lon", true).endObject().endObject().endObject().endArray().endObject().endObject().string(); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); @@ -530,7 +530,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true) .field("store", true).endObject().endObject().endObject().endObject().string(); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); @@ -556,7 +556,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true) .field("store", true).endObject().endObject().endObject().endObject().string(); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); @@ -699,7 +699,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { .endObject().endObject().endObject().string(); // create index and add a test point (dr5regy6rc6z) - Version version = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); CreateIndexRequestBuilder mappingRequest = client().admin().indices().prepareCreate("test").setSettings(settings) .addMapping("pin", mapping); @@ -724,7 +724,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { .endObject().endObject().endObject().endObject().string(); // create index and add a test point (dr5regy6rc6z) - Version version = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); CreateIndexRequestBuilder mappingRequest = client().admin().indices().prepareCreate("test").setSettings(settings) .addMapping("pin", mapping); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeohashMappingGeoPointTests.java b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeohashMappingGeoPointTests.java index 5de6c517ab2..bd23817ba50 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeohashMappingGeoPointTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeohashMappingGeoPointTests.java @@ -57,7 +57,7 @@ public class GeohashMappingGeoPointTests extends ESSingleNodeTestCase { .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", false) .endObject().endObject().endObject().endObject().string(); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); @@ -81,7 +81,7 @@ public class GeohashMappingGeoPointTests extends ESSingleNodeTestCase { .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", false).endObject().endObject() .endObject().endObject().string(); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); @@ -105,7 +105,7 @@ public class GeohashMappingGeoPointTests extends ESSingleNodeTestCase { .startObject("properties").startObject("point").field("type", "geo_point").field("geohash", true) .endObject().endObject().endObject().endObject().string(); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); @@ -126,7 +126,7 @@ public class GeohashMappingGeoPointTests extends ESSingleNodeTestCase { .startObject("properties").startObject("point").field("type", "geo_point").field("geohash", true) .field("geohash_precision", 10).endObject().endObject().endObject().endObject().string(); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); FieldMapper mapper = defaultMapper.mappers().smartNameFieldMapper("point"); @@ -140,7 +140,7 @@ public class GeohashMappingGeoPointTests extends ESSingleNodeTestCase { .startObject("properties").startObject("point").field("type", "geo_point").field("geohash", true).field("geohash_precision", "5m").endObject().endObject() .endObject().endObject().string(); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); FieldMapper mapper = defaultMapper.mappers().smartNameFieldMapper("point"); @@ -154,7 +154,7 @@ public class GeohashMappingGeoPointTests extends ESSingleNodeTestCase { .startObject("properties").startObject("point").field("type", "geo_point").endObject().endObject() .endObject().endObject().string(); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); DocumentMapper defaultMapper = createIndex("test", settings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/lucene/DoubleIndexingDocTests.java b/core/src/test/java/org/elasticsearch/index/mapper/lucene/DoubleIndexingDocTests.java index d171430dfff..05677d0ed8f 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/lucene/DoubleIndexingDocTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/lucene/DoubleIndexingDocTests.java @@ -64,7 +64,7 @@ public class DoubleIndexingDocTests extends ESSingleNodeTestCase { writer.addDocument(doc.rootDoc()); writer.addDocument(doc.rootDoc()); - IndexReader reader = DirectoryReader.open(writer, true); + IndexReader reader = DirectoryReader.open(writer); IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field1").fieldType().termQuery("value1", null), 10); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/lucene/StoredNumericValuesTests.java b/core/src/test/java/org/elasticsearch/index/mapper/lucene/StoredNumericValuesTests.java index 0cd6fa0e1c9..9923846da0e 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/lucene/StoredNumericValuesTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/lucene/StoredNumericValuesTests.java @@ -76,7 +76,7 @@ public class StoredNumericValuesTests extends ESSingleNodeTestCase { // Indexing a doc in the old way FieldType fieldType = new FieldType(); fieldType.setStored(true); - fieldType.setNumericType(FieldType.NumericType.INT); + fieldType.setNumericType(FieldType.LegacyNumericType.INT); Document doc2 = new Document(); doc2.add(new StoredField("field1", new BytesRef(Numbers.intToBytes(1)))); doc2.add(new StoredField("field2", new BytesRef(Numbers.floatToBytes(1.1f)))); @@ -85,7 +85,7 @@ public class StoredNumericValuesTests extends ESSingleNodeTestCase { doc2.add(new StoredField("field3", new BytesRef(Numbers.longToBytes(3L)))); writer.addDocument(doc2); - DirectoryReader reader = DirectoryReader.open(writer, true); + DirectoryReader reader = DirectoryReader.open(writer); IndexSearcher searcher = new IndexSearcher(reader); Set fields = new HashSet<>(Arrays.asList("field1", "field2", "field3")); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldsIntegrationIT.java b/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldsIntegrationIT.java index 6a82052bfa8..a1f6929fade 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldsIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldsIntegrationIT.java @@ -112,7 +112,7 @@ public class MultiFieldsIntegrationIT extends ESIntegTestCase { assertThat(mappingMetaData, not(nullValue())); Map mappingSource = mappingMetaData.sourceAsMap(); Map aField = ((Map) XContentMapValues.extractValue("properties.a", mappingSource)); - logger.info("Keys: " + aField.keySet()); + logger.info("Keys: {}", aField.keySet()); assertThat(aField.size(), equalTo(2)); assertThat(aField.get("type").toString(), equalTo("geo_point")); assertThat(aField.get("fields"), notNullValue()); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/numeric/SimpleNumericTests.java b/core/src/test/java/org/elasticsearch/index/mapper/numeric/SimpleNumericTests.java index bf21f2fd6d3..c10ccd14262 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/numeric/SimpleNumericTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/numeric/SimpleNumericTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.mapper.numeric; -import org.apache.lucene.analysis.NumericTokenStream; +import org.apache.lucene.analysis.LegacyNumericTokenStream; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.document.Field; import org.apache.lucene.index.DocValuesType; @@ -623,8 +623,8 @@ public class SimpleNumericTests extends ESSingleNodeTestCase { // check the tokenstream actually used by the indexer TokenStream ts = field.tokenStream(null, null); - assertThat(ts, instanceOf(NumericTokenStream.class)); - assertEquals(expected, ((NumericTokenStream)ts).getPrecisionStep()); + assertThat(ts, instanceOf(LegacyNumericTokenStream.class)); + assertEquals(expected, ((LegacyNumericTokenStream)ts).getPrecisionStep()); } public void testTermVectorsBackCompat() throws Exception { @@ -684,4 +684,20 @@ public class SimpleNumericTests extends ESSingleNodeTestCase { parser = createIndex("index2-" + type, oldIndexSettings).mapperService().documentMapperParser(); parser.parse("type", new CompressedXContent(mappingWithTV)); // no exception } + + public void testRejectNorms() throws IOException { + // not supported as of 5.0 + for (String type : Arrays.asList("byte", "short", "integer", "long", "float", "double")) { + DocumentMapperParser parser = createIndex("index-" + type).mapperService().documentMapperParser(); + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties") + .startObject("foo") + .field("type", type) + .field("norms", random().nextBoolean()) + .endObject() + .endObject().endObject().endObject().string(); + MapperParsingException e = expectThrows(MapperParsingException.class, () -> parser.parse("type", new CompressedXContent(mapping))); + assertThat(e.getMessage(), containsString("Mapping definition for [foo] has unsupported parameters: [norms")); + } + } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/object/SimpleObjectMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/object/SimpleObjectMappingTests.java index 96d5559f457..907616712a2 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/object/SimpleObjectMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/object/SimpleObjectMappingTests.java @@ -28,32 +28,28 @@ import org.elasticsearch.test.ESSingleNodeTestCase; import static org.hamcrest.Matchers.containsString; -/** - */ public class SimpleObjectMappingTests extends ESSingleNodeTestCase { public void testDifferentInnerObjectTokenFailure() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .endObject().endObject().string(); DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - try { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { defaultMapper.parse("test", "type", "1", new BytesArray(" {\n" + - " \"object\": {\n" + - " \"array\":[\n" + - " {\n" + - " \"object\": { \"value\": \"value\" }\n" + - " },\n" + - " {\n" + - " \"object\":\"value\"\n" + - " }\n" + - " ]\n" + - " },\n" + - " \"value\":\"value\"\n" + - " }")); - fail(); - } catch (MapperParsingException e) { - // all is well - } + " \"object\": {\n" + + " \"array\":[\n" + + " {\n" + + " \"object\": { \"value\": \"value\" }\n" + + " },\n" + + " {\n" + + " \"object\":\"value\"\n" + + " }\n" + + " ]\n" + + " },\n" + + " \"value\":\"value\"\n" + + " }")); + }); + assertTrue(e.getMessage(), e.getMessage().contains("different type")); } public void testEmptyArrayProperties() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java index 86c67db219f..8007e624836 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java @@ -564,7 +564,7 @@ public class SimpleStringMappingTests extends ESSingleNodeTestCase { mapperService.merge("type", new CompressedXContent(updatedMapping), MapperService.MergeReason.MAPPING_UPDATE, false); fail(); } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("different [omit_norms]")); + assertThat(e.getMessage(), containsString("different [norms]")); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java index ed58bb63b65..0a8d75d42f0 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java @@ -182,7 +182,7 @@ public class TimestampMappingTests extends ESSingleNodeTestCase { IndexRequest request = new IndexRequest("test", "type", "1").source(doc); request.process(metaData, mappingMetaData, true, "test"); assertThat(request.timestamp(), notNullValue()); - assertThat(request.timestamp(), is(MappingMetaData.Timestamp.parseStringTimestamp("1970-01-01", Joda.forPattern("YYYY-MM-dd"), Version.CURRENT))); + assertThat(request.timestamp(), is(MappingMetaData.Timestamp.parseStringTimestamp("1970-01-01", Joda.forPattern("YYYY-MM-dd")))); } // Issue 4718: was throwing a TimestampParsingException: failed to parse timestamp [null] @@ -414,27 +414,11 @@ public class TimestampMappingTests extends ESSingleNodeTestCase { assertThat(request.timestamp(), is("1433239200000")); } - public void testThatIndicesBefore2xMustSupportUnixTimestampsInAnyDateFormat() throws Exception { + public void testThatIndicesAfter2_0DontSupportUnixTimestampsInAnyDateFormat() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("_timestamp").field("enabled", true).field("format", "dateOptionalTime").endObject() .endObject().endObject().string(); - BytesReference source = XContentFactory.jsonBuilder().startObject().field("field", "value").endObject().bytes(); - - // - // test with older versions - Settings oldSettings = settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, randomVersionBetween(random(), Version.V_0_90_0, Version.V_1_6_0)).build(); - DocumentMapper docMapper = createIndex("old-index", oldSettings).mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - - MetaData metaData = client().admin().cluster().prepareState().get().getState().getMetaData(); - - // both index request are successfully processed - IndexRequest oldIndexDateIndexRequest = new IndexRequest("old-index", "type", "1").source(source).timestamp("1970-01-01"); - oldIndexDateIndexRequest.process(metaData, new MappingMetaData(docMapper), true, "old-index"); - IndexRequest oldIndexTimestampIndexRequest = new IndexRequest("old-index", "type", "1").source(source).timestamp("1234567890"); - oldIndexTimestampIndexRequest.process(metaData, new MappingMetaData(docMapper), true, "old-index"); - - // // test with 2.x DocumentMapper currentMapper = createIndex("new-index").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); MetaData newMetaData = client().admin().cluster().prepareState().get().getState().getMetaData(); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingOnClusterIT.java b/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingOnClusterIT.java index a3d6a87c43f..600f84b5f5f 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingOnClusterIT.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingOnClusterIT.java @@ -49,7 +49,7 @@ public class UpdateMappingOnClusterIT extends ESIntegTestCase { String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/update/all_mapping_create_index.json"); String mappingUpdate = copyToStringFromClasspath("/org/elasticsearch/index/mapper/update/all_mapping_update_with_conflicts.json"); String[] errorMessage = { - "[_all] has different [omit_norms] values", + "[_all] has different [norms] values", "[_all] has different [store] values", "[_all] has different [store_term_vector] values", "[_all] has different [store_term_vector_offsets] values", diff --git a/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java index ab0182aa0ef..2e2f5f2446f 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java @@ -256,7 +256,7 @@ public class UpdateMappingTests extends ESSingleNodeTestCase { } public void testTimestampParsing() throws IOException { - IndexService indexService = createIndex("test", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build()); + IndexService indexService = createIndex("test"); XContentBuilder indexMapping = XContentFactory.jsonBuilder(); boolean enabled = randomBoolean(); indexMapping.startObject() diff --git a/core/src/test/java/org/elasticsearch/index/percolator/PercolatorFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/percolator/PercolatorFieldMapperTests.java index 074e64f8232..c803cc9624a 100644 --- a/core/src/test/java/org/elasticsearch/index/percolator/PercolatorFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/percolator/PercolatorFieldMapperTests.java @@ -26,13 +26,14 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.percolator.PercolatorService; +import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.test.ESSingleNodeTestCase; import org.junit.Before; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; public class PercolatorFieldMapperTests extends ESSingleNodeTestCase { @@ -48,28 +49,40 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase { .endObject().endObject().string(); mapperService.merge("type", new CompressedXContent(mapper), MapperService.MergeReason.MAPPING_UPDATE, true); - String percolatorMapper = XContentFactory.jsonBuilder().startObject().startObject(PercolatorService.TYPE_NAME) + String percolatorMapper = XContentFactory.jsonBuilder().startObject().startObject(PercolatorFieldMapper.TYPE_NAME) .startObject("properties").startObject("query").field("type", "percolator").endObject().endObject() .endObject().endObject().string(); - mapperService.merge(PercolatorService.TYPE_NAME, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true); + mapperService.merge(PercolatorFieldMapper.TYPE_NAME, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true); } public void testPercolatorFieldMapper() throws Exception { - ParsedDocument doc = mapperService.documentMapper(PercolatorService.TYPE_NAME).parse("test", PercolatorService.TYPE_NAME, "1", XContentFactory.jsonBuilder().startObject() + ParsedDocument doc = mapperService.documentMapper(PercolatorFieldMapper.TYPE_NAME).parse("test", PercolatorFieldMapper.TYPE_NAME, "1", XContentFactory.jsonBuilder().startObject() .field("query", termQuery("field", "value")) .endObject().bytes()); assertThat(doc.rootDoc().getFields(PercolatorFieldMapper.EXTRACTED_TERMS_FULL_FIELD_NAME).length, equalTo(1)); assertThat(doc.rootDoc().getFields(PercolatorFieldMapper.EXTRACTED_TERMS_FULL_FIELD_NAME)[0].binaryValue().utf8ToString(), equalTo("field\0value")); + assertThat(doc.rootDoc().getFields(PercolatorFieldMapper.QUERY_BUILDER_FULL_FIELD_NAME).length, equalTo(1)); } + public void testPercolatorFieldMapperUnMappedField() throws Exception { + MapperParsingException exception = expectThrows(MapperParsingException.class, () -> { + mapperService.documentMapper(PercolatorFieldMapper.TYPE_NAME).parse("test", PercolatorFieldMapper.TYPE_NAME, "1", XContentFactory.jsonBuilder().startObject() + .field("query", termQuery("unmapped_field", "value")) + .endObject().bytes()); + }); + assertThat(exception.getCause(), instanceOf(QueryShardException.class)); + assertThat(exception.getCause().getMessage(), equalTo("No field mapping can be found for the field with name [unmapped_field]")); + } + + public void testPercolatorFieldMapper_noQuery() throws Exception { - ParsedDocument doc = mapperService.documentMapper(PercolatorService.TYPE_NAME).parse("test", PercolatorService.TYPE_NAME, "1", XContentFactory.jsonBuilder().startObject() + ParsedDocument doc = mapperService.documentMapper(PercolatorFieldMapper.TYPE_NAME).parse("test", PercolatorFieldMapper.TYPE_NAME, "1", XContentFactory.jsonBuilder().startObject() .endObject().bytes()); assertThat(doc.rootDoc().getFields(PercolatorFieldMapper.EXTRACTED_TERMS_FULL_FIELD_NAME).length, equalTo(0)); try { - mapperService.documentMapper(PercolatorService.TYPE_NAME).parse("test", PercolatorService.TYPE_NAME, "1", XContentFactory.jsonBuilder().startObject() + mapperService.documentMapper(PercolatorFieldMapper.TYPE_NAME).parse("test", PercolatorFieldMapper.TYPE_NAME, "1", XContentFactory.jsonBuilder().startObject() .nullField("query") .endObject().bytes()); } catch (MapperParsingException e) { @@ -81,11 +94,11 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase { IndexService indexService = createIndex("test1", Settings.EMPTY); MapperService mapperService = indexService.mapperService(); - String percolatorMapper = XContentFactory.jsonBuilder().startObject().startObject(PercolatorService.TYPE_NAME) + String percolatorMapper = XContentFactory.jsonBuilder().startObject().startObject(PercolatorFieldMapper.TYPE_NAME) .startObject("properties").startObject("query").field("type", "percolator").field("index", "no").endObject().endObject() .endObject().endObject().string(); try { - mapperService.merge(PercolatorService.TYPE_NAME, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true); + mapperService.merge(PercolatorFieldMapper.TYPE_NAME, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true); fail("MapperParsingException expected"); } catch (MapperParsingException e) { assertThat(e.getMessage(), equalTo("Mapping definition for [query] has unsupported parameters: [index : no]")); diff --git a/core/src/test/java/org/elasticsearch/index/percolator/PercolatorHighlightSubFetchPhaseTests.java b/core/src/test/java/org/elasticsearch/index/percolator/PercolatorHighlightSubFetchPhaseTests.java new file mode 100644 index 00000000000..5e37f1ce6e8 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/percolator/PercolatorHighlightSubFetchPhaseTests.java @@ -0,0 +1,85 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.percolator; + +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.BoostQuery; +import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.index.query.PercolatorQuery; +import org.elasticsearch.search.highlight.SearchContextHighlight; +import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.test.ESTestCase; +import org.mockito.Mockito; + +import java.util.Collections; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; + +public class PercolatorHighlightSubFetchPhaseTests extends ESTestCase { + + public void testHitsExecutionNeeded() { + PercolatorQuery percolatorQuery = new PercolatorQuery.Builder("", ctx -> null, new BytesArray("{}"), + Mockito.mock(IndexSearcher.class), new MatchAllDocsQuery()) + .build(); + + PercolatorHighlightSubFetchPhase subFetchPhase = new PercolatorHighlightSubFetchPhase(null); + SearchContext searchContext = Mockito.mock(SearchContext.class); + Mockito.when(searchContext.highlight()).thenReturn(new SearchContextHighlight(Collections.emptyList())); + Mockito.when(searchContext.query()).thenReturn(new MatchAllDocsQuery()); + + assertThat(subFetchPhase.hitsExecutionNeeded(searchContext), is(false)); + IllegalStateException exception = expectThrows(IllegalStateException.class, + () -> subFetchPhase.hitsExecute(searchContext, null)); + assertThat(exception.getMessage(), equalTo("couldn't locate percolator query")); + + Mockito.when(searchContext.query()).thenReturn(percolatorQuery); + assertThat(subFetchPhase.hitsExecutionNeeded(searchContext), is(true)); + } + + public void testLocatePercolatorQuery() { + PercolatorQuery percolatorQuery = new PercolatorQuery.Builder("", ctx -> null, new BytesArray("{}"), + Mockito.mock(IndexSearcher.class), new MatchAllDocsQuery()) + .build(); + + assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(new MatchAllDocsQuery()), nullValue()); + BooleanQuery.Builder bq = new BooleanQuery.Builder(); + bq.add(new MatchAllDocsQuery(), BooleanClause.Occur.FILTER); + assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(bq.build()), nullValue()); + bq.add(percolatorQuery, BooleanClause.Occur.FILTER); + assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(bq.build()), sameInstance(percolatorQuery)); + + ConstantScoreQuery constantScoreQuery = new ConstantScoreQuery(new MatchAllDocsQuery()); + assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(constantScoreQuery), nullValue()); + constantScoreQuery = new ConstantScoreQuery(percolatorQuery); + assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(constantScoreQuery), sameInstance(percolatorQuery)); + + BoostQuery boostQuery = new BoostQuery(new MatchAllDocsQuery(), 1f); + assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(boostQuery), nullValue()); + boostQuery = new BoostQuery(percolatorQuery, 1f); + assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(boostQuery), sameInstance(percolatorQuery)); + } + +} diff --git a/core/src/test/java/org/elasticsearch/index/percolator/PercolatorQueryCacheTests.java b/core/src/test/java/org/elasticsearch/index/percolator/PercolatorQueryCacheTests.java new file mode 100644 index 00000000000..4b0e8dd7089 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/percolator/PercolatorQueryCacheTests.java @@ -0,0 +1,352 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.percolator; + +import org.apache.lucene.analysis.MockAnalyzer; +import org.apache.lucene.document.BinaryDocValuesField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.StoredField; +import org.apache.lucene.document.StringField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.index.Term; +import org.apache.lucene.index.TieredMergePolicy; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.WildcardQuery; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexWarmer; +import org.elasticsearch.index.analysis.AnalysisRegistry; +import org.elasticsearch.index.analysis.AnalysisService; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.internal.SourceFieldMapper; +import org.elasticsearch.index.mapper.internal.TypeFieldMapper; +import org.elasticsearch.index.query.BoolQueryParser; +import org.elasticsearch.index.query.PercolatorQuery; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryParser; +import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.index.query.TermQueryParser; +import org.elasticsearch.index.query.WildcardQueryParser; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.similarity.SimilarityService; +import org.elasticsearch.index.warmer.ShardIndexWarmerService; +import org.elasticsearch.indices.IndicesModule; +import org.elasticsearch.indices.mapper.MapperRegistry; +import org.elasticsearch.indices.query.IndicesQueriesRegistry; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.index.query.QueryBuilders.boolQuery; +import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.elasticsearch.index.query.QueryBuilders.wildcardQuery; +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class PercolatorQueryCacheTests extends ESTestCase { + + private QueryShardContext queryShardContext; + private PercolatorQueryCache cache; + + void initialize(Object... fields) throws IOException { + Settings settings = Settings.settingsBuilder() + .put("node.name", PercolatorQueryCacheTests.class.toString()) + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .build(); + + Map> queryParsers = new HashMap<>(); + queryParsers.put("term", new TermQueryParser()); + queryParsers.put("wildcard", new WildcardQueryParser()); + queryParsers.put("bool", new BoolQueryParser()); + IndicesQueriesRegistry indicesQueriesRegistry = new IndicesQueriesRegistry(settings, queryParsers); + + Settings indexSettings = Settings.settingsBuilder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings(new Index("_index", ClusterState.UNKNOWN_UUID), indexSettings); + SimilarityService similarityService = new SimilarityService(idxSettings, Collections.emptyMap()); + AnalysisService analysisService = new AnalysisRegistry(null, new Environment(settings)).build(idxSettings); + MapperRegistry mapperRegistry = new IndicesModule().getMapperRegistry(); + MapperService mapperService = new MapperService(idxSettings, analysisService, similarityService, mapperRegistry, + () -> queryShardContext); + mapperService.merge("type", new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef("type", fields).string()), + MapperService.MergeReason.MAPPING_UPDATE, false); + cache = new PercolatorQueryCache(idxSettings, () -> queryShardContext); + queryShardContext = new QueryShardContext(idxSettings, null, null, mapperService, similarityService, null, + indicesQueriesRegistry, cache); + } + + public void testLoadQueries() throws Exception { + Directory directory = newDirectory(); + IndexWriter indexWriter = new IndexWriter( + directory, + newIndexWriterConfig(new MockAnalyzer(random())) + ); + + boolean legacyFormat = randomBoolean(); + Version version = legacyFormat ? Version.V_2_0_0 : Version.CURRENT; + + storeQuery("0", indexWriter, termQuery("field1", "value1"), true, legacyFormat); + storeQuery("1", indexWriter, wildcardQuery("field1", "v*"), true, legacyFormat); + storeQuery("2", indexWriter, boolQuery().must(termQuery("field1", "value1")).must(termQuery("field2", "value2")), + true, legacyFormat); + // dymmy docs should be skipped during loading: + Document doc = new Document(); + doc.add(new StringField("dummy", "value", Field.Store.YES)); + indexWriter.addDocument(doc); + storeQuery("4", indexWriter, termQuery("field2", "value2"), true, legacyFormat); + // only documents that .percolator type should be loaded: + storeQuery("5", indexWriter, termQuery("field2", "value2"), false, legacyFormat); + storeQuery("6", indexWriter, termQuery("field3", "value3"), true, legacyFormat); + indexWriter.forceMerge(1); + + // also include queries for percolator docs marked as deleted: + indexWriter.deleteDocuments(new Term("id", "6")); + indexWriter.close(); + + ShardId shardId = new ShardId("_index", ClusterState.UNKNOWN_UUID, 0); + IndexReader indexReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(directory), shardId); + assertThat(indexReader.leaves().size(), equalTo(1)); + assertThat(indexReader.numDeletedDocs(), equalTo(1)); + assertThat(indexReader.maxDoc(), equalTo(7)); + + initialize("field1", "type=keyword", "field2", "type=keyword", "field3", "type=keyword"); + + PercolatorQueryCache.QueriesLeaf leaf = cache.loadQueries(indexReader.leaves().get(0), version); + assertThat(leaf.queries.size(), equalTo(5)); + assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("field1", "value1")))); + assertThat(leaf.getQuery(1), equalTo(new WildcardQuery(new Term("field1", "v*")))); + assertThat(leaf.getQuery(2), equalTo(new BooleanQuery.Builder() + .add(new TermQuery(new Term("field1", "value1")), BooleanClause.Occur.MUST) + .add(new TermQuery(new Term("field2", "value2")), BooleanClause.Occur.MUST) + .build() + )); + assertThat(leaf.getQuery(4), equalTo(new TermQuery(new Term("field2", "value2")))); + assertThat(leaf.getQuery(6), equalTo(new TermQuery(new Term("field3", "value3")))); + + indexReader.close(); + directory.close(); + } + + public void testGetQueries() throws Exception { + Directory directory = newDirectory(); + IndexWriter indexWriter = new IndexWriter( + directory, + newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(NoMergePolicy.INSTANCE) + ); + + storeQuery("0", indexWriter, termQuery("a", "0"), true, false); + storeQuery("1", indexWriter, termQuery("a", "1"), true, false); + storeQuery("2", indexWriter, termQuery("a", "2"), true, false); + indexWriter.flush(); + storeQuery("3", indexWriter, termQuery("a", "3"), true, false); + storeQuery("4", indexWriter, termQuery("a", "4"), true, false); + storeQuery("5", indexWriter, termQuery("a", "5"), true, false); + indexWriter.flush(); + storeQuery("6", indexWriter, termQuery("a", "6"), true, false); + storeQuery("7", indexWriter, termQuery("a", "7"), true, false); + storeQuery("8", indexWriter, termQuery("a", "8"), true, false); + indexWriter.flush(); + indexWriter.close(); + + ShardId shardId = new ShardId("_index", ClusterState.UNKNOWN_UUID , 0); + IndexReader indexReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(directory), shardId); + assertThat(indexReader.leaves().size(), equalTo(3)); + assertThat(indexReader.maxDoc(), equalTo(9)); + + initialize("a", "type=keyword"); + + try { + cache.getQueries(indexReader.leaves().get(0)); + fail("IllegalStateException expected"); + } catch (IllegalStateException e) { + assertThat(e.getMessage(), equalTo("queries not loaded, queries should be have been preloaded during index warming...")); + } + + IndexShard indexShard = mockIndexShard(); + ThreadPool threadPool = mockThreadPool(); + IndexWarmer.Listener listener = cache.createListener(threadPool); + listener.warmNewReaders(indexShard, new Engine.Searcher("test", new IndexSearcher(indexReader))); + PercolatorQueryCacheStats stats = cache.getStats(shardId); + assertThat(stats.getNumQueries(), equalTo(9L)); + + PercolatorQuery.QueryRegistry.Leaf leaf = cache.getQueries(indexReader.leaves().get(0)); + assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "0")))); + assertThat(leaf.getQuery(1), equalTo(new TermQuery(new Term("a", "1")))); + assertThat(leaf.getQuery(2), equalTo(new TermQuery(new Term("a", "2")))); + + leaf = cache.getQueries(indexReader.leaves().get(1)); + assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "3")))); + assertThat(leaf.getQuery(1), equalTo(new TermQuery(new Term("a", "4")))); + assertThat(leaf.getQuery(2), equalTo(new TermQuery(new Term("a", "5")))); + + leaf = cache.getQueries(indexReader.leaves().get(2)); + assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "6")))); + assertThat(leaf.getQuery(1), equalTo(new TermQuery(new Term("a", "7")))); + assertThat(leaf.getQuery(2), equalTo(new TermQuery(new Term("a", "8")))); + + indexReader.close(); + directory.close(); + } + + public void testInvalidateEntries() throws Exception { + Directory directory = newDirectory(); + IndexWriter indexWriter = new IndexWriter( + directory, + newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(NoMergePolicy.INSTANCE) + ); + + storeQuery("0", indexWriter, termQuery("a", "0"), true, false); + indexWriter.flush(); + storeQuery("1", indexWriter, termQuery("a", "1"), true, false); + indexWriter.flush(); + storeQuery("2", indexWriter, termQuery("a", "2"), true, false); + indexWriter.flush(); + + ShardId shardId = new ShardId("_index", ClusterState.UNKNOWN_UUID, 0); + IndexReader indexReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter), shardId); + assertThat(indexReader.leaves().size(), equalTo(3)); + assertThat(indexReader.maxDoc(), equalTo(3)); + + initialize("a", "type=keyword"); + + IndexShard indexShard = mockIndexShard(); + ThreadPool threadPool = mockThreadPool(); + IndexWarmer.Listener listener = cache.createListener(threadPool); + listener.warmNewReaders(indexShard, new Engine.Searcher("test", new IndexSearcher(indexReader))); + assertThat(cache.getStats(shardId).getNumQueries(), equalTo(3L)); + + PercolatorQuery.QueryRegistry.Leaf leaf = cache.getQueries(indexReader.leaves().get(0)); + assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "0")))); + + leaf = cache.getQueries(indexReader.leaves().get(1)); + assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "1")))); + + leaf = cache.getQueries(indexReader.leaves().get(2)); + assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "2")))); + + // change merge policy, so that merges will actually happen: + indexWriter.getConfig().setMergePolicy(new TieredMergePolicy()); + indexWriter.deleteDocuments(new Term("id", "1")); + indexWriter.forceMergeDeletes(); + indexReader.close(); + indexReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter), shardId); + assertThat(indexReader.leaves().size(), equalTo(2)); + assertThat(indexReader.maxDoc(), equalTo(2)); + listener.warmNewReaders(indexShard, new Engine.Searcher("test", new IndexSearcher(indexReader))); + assertThat(cache.getStats(shardId).getNumQueries(), equalTo(2L)); + + leaf = cache.getQueries(indexReader.leaves().get(0)); + assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "0")))); + + leaf = cache.getQueries(indexReader.leaves().get(1)); + assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "2")))); + + indexWriter.forceMerge(1); + indexReader.close(); + indexReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter), shardId); + assertThat(indexReader.leaves().size(), equalTo(1)); + assertThat(indexReader.maxDoc(), equalTo(2)); + listener.warmNewReaders(indexShard, new Engine.Searcher("test", new IndexSearcher(indexReader))); + assertThat(cache.getStats(shardId).getNumQueries(), equalTo(2L)); + + leaf = cache.getQueries(indexReader.leaves().get(0)); + assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "0")))); + assertThat(leaf.getQuery(1), equalTo(new TermQuery(new Term("a", "2")))); + + indexWriter.close(); + indexReader.close(); + directory.close(); + } + + void storeQuery(String id, IndexWriter indexWriter, QueryBuilder queryBuilder, boolean typeField, boolean legacy) throws IOException { + Document doc = new Document(); + doc.add(new StringField("id", id, Field.Store.NO)); + if (typeField) { + doc.add(new StringField(TypeFieldMapper.NAME, PercolatorFieldMapper.TYPE_NAME, Field.Store.NO)); + } + if (legacy) { + BytesReference percolatorQuery = XContentFactory.jsonBuilder().startObject() + .field("query", queryBuilder) + .endObject().bytes(); + doc.add(new StoredField( + SourceFieldMapper.NAME, + percolatorQuery.array(), percolatorQuery.arrayOffset(), percolatorQuery.length()) + ); + } else { + BytesRef queryBuilderAsBytes = new BytesRef( + XContentFactory.contentBuilder(PercolatorQueryCache.QUERY_BUILDER_CONTENT_TYPE).value(queryBuilder).bytes().toBytes() + ); + doc.add(new BinaryDocValuesField(PercolatorFieldMapper.QUERY_BUILDER_FULL_FIELD_NAME, queryBuilderAsBytes)); + } + indexWriter.addDocument(doc); + } + + IndexShard mockIndexShard() { + IndexShard indexShard = mock(IndexShard.class); + ShardIndexWarmerService shardIndexWarmerService = mock(ShardIndexWarmerService.class); + when(shardIndexWarmerService.logger()).thenReturn(logger); + when(indexShard.warmerService()).thenReturn(shardIndexWarmerService); + IndexSettings indexSettings = new IndexSettings( + IndexMetaData.builder("_index").settings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + ).build(), + Settings.EMPTY + ); + when(indexShard.indexSettings()).thenReturn(indexSettings); + return indexShard; + } + + ThreadPool mockThreadPool() { + ThreadPool threadPool = mock(ThreadPool.class); + when(threadPool.executor(anyString())).thenReturn(Runnable::run); + return threadPool; + } + +} diff --git a/core/src/test/java/org/elasticsearch/index/query/AbstractQueryTestCase.java b/core/src/test/java/org/elasticsearch/index/query/AbstractQueryTestCase.java index 53594f43080..f92ef2d3fbe 100644 --- a/core/src/test/java/org/elasticsearch/index/query/AbstractQueryTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/query/AbstractQueryTestCase.java @@ -22,8 +22,6 @@ package org.elasticsearch.index.query; import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator; import com.fasterxml.jackson.core.JsonParseException; import com.fasterxml.jackson.core.io.JsonStringEncoder; - -import org.apache.lucene.index.memory.MemoryIndex; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; @@ -38,10 +36,10 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; import org.elasticsearch.action.termvectors.MultiTermVectorsResponse; import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.bytes.BytesArray; @@ -75,6 +73,7 @@ import org.elasticsearch.index.cache.bitset.BitsetFilterCache; import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.percolator.PercolatorQueryCache; import org.elasticsearch.index.query.support.QueryParsers; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.similarity.SimilarityService; @@ -100,7 +99,6 @@ import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.TestSearchContext; import org.elasticsearch.test.VersionUtils; -import org.elasticsearch.test.cluster.TestClusterService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPoolModule; import org.joda.time.DateTime; @@ -123,6 +121,8 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.ExecutionException; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.createClusterService; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.setState; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; @@ -140,11 +140,12 @@ public abstract class AbstractQueryTestCase> protected static final String DATE_FIELD_NAME = "mapped_date"; protected static final String OBJECT_FIELD_NAME = "mapped_object"; protected static final String GEO_POINT_FIELD_NAME = "mapped_geo_point"; + protected static final String GEO_POINT_FIELD_MAPPING = "type=geo_point,lat_lon=true,geohash=true,geohash_prefix=true"; protected static final String GEO_SHAPE_FIELD_NAME = "mapped_geo_shape"; - protected static final String[] MAPPED_FIELD_NAMES = new String[] { STRING_FIELD_NAME, INT_FIELD_NAME, DOUBLE_FIELD_NAME, - BOOLEAN_FIELD_NAME, DATE_FIELD_NAME, OBJECT_FIELD_NAME, GEO_POINT_FIELD_NAME, GEO_SHAPE_FIELD_NAME }; - protected static final String[] MAPPED_LEAF_FIELD_NAMES = new String[] { STRING_FIELD_NAME, INT_FIELD_NAME, DOUBLE_FIELD_NAME, - BOOLEAN_FIELD_NAME, DATE_FIELD_NAME, GEO_POINT_FIELD_NAME }; + protected static final String[] MAPPED_FIELD_NAMES = new String[]{STRING_FIELD_NAME, INT_FIELD_NAME, DOUBLE_FIELD_NAME, + BOOLEAN_FIELD_NAME, DATE_FIELD_NAME, OBJECT_FIELD_NAME, GEO_POINT_FIELD_NAME, GEO_SHAPE_FIELD_NAME}; + protected static final String[] MAPPED_LEAF_FIELD_NAMES = new String[]{STRING_FIELD_NAME, INT_FIELD_NAME, DOUBLE_FIELD_NAME, + BOOLEAN_FIELD_NAME, DATE_FIELD_NAME, GEO_POINT_FIELD_NAME}; private static final int NUMBER_OF_TESTQUERIES = 20; private static Injector injector; @@ -193,11 +194,13 @@ public abstract class AbstractQueryTestCase> .build(); Settings indexSettings = Settings.settingsBuilder() .put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); + final ThreadPool threadPool = new ThreadPool(settings); index = new Index(randomAsciiOfLengthBetween(1, 10), "_na_"); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings(index, indexSettings); - final TestClusterService clusterService = new TestClusterService(); - clusterService.setState(new ClusterState.Builder(clusterService.state()).metaData(new MetaData.Builder().put( + ClusterService clusterService = createClusterService(threadPool); + setState(clusterService, new ClusterState.Builder(clusterService.state()).metaData(new MetaData.Builder().put( new IndexMetaData.Builder(index.getName()).settings(indexSettings).numberOfShards(1).numberOfReplicas(0)))); + SettingsModule settingsModule = new SettingsModule(settings); settingsModule.registerSetting(InternalSettingsPlugin.VERSION_CREATED); final Client proxy = (Client) Proxy.newProxyInstance( @@ -209,10 +212,10 @@ public abstract class AbstractQueryTestCase> @Override protected void configure() { Settings settings = Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - // no file watching, so we don't need a ResourceWatcherService - .put(ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING.getKey(), false) - .build(); + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + // no file watching, so we don't need a ResourceWatcherService + .put(ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING.getKey(), false) + .build(); MockScriptEngine mockScriptEngine = new MockScriptEngine(); Multibinder multibinder = Multibinder.newSetBinder(binder(), ScriptEngineService.class); multibinder.addBinding().toInstance(mockScriptEngine); @@ -228,7 +231,7 @@ public abstract class AbstractQueryTestCase> try { ScriptService scriptService = new ScriptService(settings, new Environment(settings), engines, null, scriptEngineRegistry, scriptContextRegistry, scriptSettings); bind(ScriptService.class).toInstance(scriptService); - } catch(IOException e) { + } catch (IOException e) { throw new IllegalStateException("error while binding ScriptService", e); } } @@ -237,7 +240,7 @@ public abstract class AbstractQueryTestCase> injector = new ModulesBuilder().add( new EnvironmentModule(new Environment(settings)), settingsModule, - new ThreadPoolModule(new ThreadPool(settings)), + new ThreadPoolModule(threadPool), new IndicesModule() { @Override public void configure() { @@ -252,6 +255,7 @@ public abstract class AbstractQueryTestCase> protected void configureSearch() { // Skip me } + @Override protected void configureSuggesters() { // Skip me @@ -286,8 +290,9 @@ public abstract class AbstractQueryTestCase> } }); + PercolatorQueryCache percolatorQueryCache = new PercolatorQueryCache(idxSettings, () -> queryShardContext); indicesQueriesRegistry = injector.getInstance(IndicesQueriesRegistry.class); - queryShardContext = new QueryShardContext(idxSettings, bitsetFilterCache, indexFieldDataService, mapperService, similarityService, scriptService, indicesQueriesRegistry); + queryShardContext = new QueryShardContext(idxSettings, bitsetFilterCache, indexFieldDataService, mapperService, similarityService, scriptService, indicesQueriesRegistry, percolatorQueryCache); //create some random type with some default field, those types will stick around for all of the subclasses currentTypes = new String[randomIntBetween(0, 5)]; for (int i = 0; i < currentTypes.length; i++) { @@ -300,12 +305,12 @@ public abstract class AbstractQueryTestCase> BOOLEAN_FIELD_NAME, "type=boolean", DATE_FIELD_NAME, "type=date", OBJECT_FIELD_NAME, "type=object", - GEO_POINT_FIELD_NAME, "type=geo_point,lat_lon=true,geohash=true,geohash_prefix=true", + GEO_POINT_FIELD_NAME, GEO_POINT_FIELD_MAPPING, GEO_SHAPE_FIELD_NAME, "type=geo_shape" ).string()), MapperService.MergeReason.MAPPING_UPDATE, false); // also add mappings for two inner field in the object field - mapperService.merge(type, new CompressedXContent("{\"properties\":{\""+OBJECT_FIELD_NAME+"\":{\"type\":\"object\"," - + "\"properties\":{\""+DATE_FIELD_NAME+"\":{\"type\":\"date\"},\""+INT_FIELD_NAME+"\":{\"type\":\"integer\"}}}}}"), + mapperService.merge(type, new CompressedXContent("{\"properties\":{\"" + OBJECT_FIELD_NAME + "\":{\"type\":\"object\"," + + "\"properties\":{\"" + DATE_FIELD_NAME + "\":{\"type\":\"date\"},\"" + INT_FIELD_NAME + "\":{\"type\":\"integer\"}}}}}"), MapperService.MergeReason.MAPPING_UPDATE, false); currentTypes[i] = type; } @@ -314,6 +319,7 @@ public abstract class AbstractQueryTestCase> @AfterClass public static void afterClass() throws Exception { + injector.getInstance(ClusterService.class).close(); terminate(injector.getInstance(ThreadPool.class)); injector = null; index = null; @@ -340,6 +346,7 @@ public abstract class AbstractQueryTestCase> @After public void afterTest() { + queryShardContext.setFieldStatsProvider(null); clientInvocationHandler.delegate = null; SearchContext.removeCurrent(); } @@ -418,9 +425,9 @@ public abstract class AbstractQueryTestCase> // we'd like to see the offending field name here assertThat(e.getMessage(), containsString("bogusField")); } - } + } - /** + /** * Test that adding additional object into otherwise correct query string * should always trigger some kind of Parsing Exception. */ @@ -689,7 +696,7 @@ public abstract class AbstractQueryTestCase> return new QueryParser() { @Override public String[] names() { - return new String[] {EmptyQueryBuilder.NAME}; + return new String[]{EmptyQueryBuilder.NAME}; } @Override @@ -713,7 +720,7 @@ public abstract class AbstractQueryTestCase> try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(output.bytes()), namedWriteableRegistry)) { QueryBuilder prototype = queryParser(query.getName()).getBuilderPrototype(); @SuppressWarnings("unchecked") - QB secondQuery = (QB)prototype.readFrom(in); + QB secondQuery = (QB) prototype.readFrom(in); return secondQuery; } } @@ -829,7 +836,7 @@ public abstract class AbstractQueryTestCase> } } else { if (randomBoolean()) { - types = new String[] { MetaData.ALL }; + types = new String[]{MetaData.ALL}; } else { types = new String[0]; } @@ -889,6 +896,7 @@ public abstract class AbstractQueryTestCase> private static class ClientInvocationHandler implements InvocationHandler { AbstractQueryTestCase delegate; + @Override public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { if (method.equals(Client.class.getMethod("get", GetRequest.class))) { @@ -899,12 +907,12 @@ public abstract class AbstractQueryTestCase> } }; } else if (method.equals(Client.class.getMethod("multiTermVectors", MultiTermVectorsRequest.class))) { - return new PlainActionFuture() { - @Override - public MultiTermVectorsResponse get() throws InterruptedException, ExecutionException { - return delegate.executeMultiTermVectors((MultiTermVectorsRequest) args[0]); - } - }; + return new PlainActionFuture() { + @Override + public MultiTermVectorsResponse get() throws InterruptedException, ExecutionException { + return delegate.executeMultiTermVectors((MultiTermVectorsRequest) args[0]); + } + }; } else if (method.equals(Object.class.getMethod("toString"))) { return "MockClient"; } @@ -949,8 +957,8 @@ public abstract class AbstractQueryTestCase> source.toXContent(builder, ToXContent.EMPTY_PARAMS); assertEquals( msg(expected, builder.string()), - expected.replaceAll("\\s+",""), - builder.string().replaceAll("\\s+","")); + expected.replaceAll("\\s+", ""), + builder.string().replaceAll("\\s+", "")); } private static String msg(String left, String right) { @@ -963,7 +971,7 @@ public abstract class AbstractQueryTestCase> } else { builder.append(">> ").append("until offset: ").append(i) .append(" [").append(left.charAt(i)).append(" vs.").append(right.charAt(i)) - .append("] [").append((int)left.charAt(i) ).append(" vs.").append((int)right.charAt(i)).append(']'); + .append("] [").append((int) left.charAt(i)).append(" vs.").append((int) right.charAt(i)).append(']'); return builder.toString(); } } @@ -972,7 +980,7 @@ public abstract class AbstractQueryTestCase> int rightEnd = Math.max(size, right.length()) - 1; builder.append(">> ").append("until offset: ").append(size) .append(" [").append(left.charAt(leftEnd)).append(" vs.").append(right.charAt(rightEnd)) - .append("] [").append((int)left.charAt(leftEnd)).append(" vs.").append((int)right.charAt(rightEnd)).append(']'); + .append("] [").append((int) left.charAt(leftEnd)).append(" vs.").append((int) right.charAt(rightEnd)).append(']'); return builder.toString(); } return ""; diff --git a/core/src/test/java/org/elasticsearch/index/query/ConstantScoreQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/ConstantScoreQueryBuilderTests.java index 832885b063a..5f26c0ce72e 100644 --- a/core/src/test/java/org/elasticsearch/index/query/ConstantScoreQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/ConstantScoreQueryBuilderTests.java @@ -54,7 +54,7 @@ public class ConstantScoreQueryBuilderTests extends AbstractQueryTestCase assertThat(query, instanceOf(TermQuery.class)); TermQuery termQuery = (TermQuery) query; Term term = termQuery.getTerm(); - assertThat(term.field(), equalTo(queryBuilder.fieldName() + GeoPointFieldMapper.Names.GEOHASH_SUFFIX)); + assertThat(term.field(), equalTo(queryBuilder.fieldName() + "." + GeoPointFieldMapper.Names.GEOHASH)); String geohash = queryBuilder.geohash(); if (queryBuilder.precision() != null) { int len = Math.min(queryBuilder.precision(), geohash.length()); diff --git a/core/src/test/java/org/elasticsearch/index/query/HasChildQueryParserTests.java b/core/src/test/java/org/elasticsearch/index/query/HasChildQueryParserTests.java index 7be9a6c74ff..dc8ebc6c4ba 100644 --- a/core/src/test/java/org/elasticsearch/index/query/HasChildQueryParserTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/HasChildQueryParserTests.java @@ -27,22 +27,27 @@ import static org.hamcrest.Matchers.is; public class HasChildQueryParserTests extends ESTestCase { public void testMinFromString() { assertThat("fromString(min) != MIN", ScoreMode.Min, equalTo(HasChildQueryParser.parseScoreMode("min"))); + assertThat("min", equalTo(HasChildQueryParser.scoreModeAsString(ScoreMode.Min))); } public void testMaxFromString() { assertThat("fromString(max) != MAX", ScoreMode.Max, equalTo(HasChildQueryParser.parseScoreMode("max"))); + assertThat("max", equalTo(HasChildQueryParser.scoreModeAsString(ScoreMode.Max))); } public void testAvgFromString() { assertThat("fromString(avg) != AVG", ScoreMode.Avg, equalTo(HasChildQueryParser.parseScoreMode("avg"))); + assertThat("avg", equalTo(HasChildQueryParser.scoreModeAsString(ScoreMode.Avg))); } public void testSumFromString() { - assertThat("fromString(total) != SUM", ScoreMode.Total, equalTo(HasChildQueryParser.parseScoreMode("total"))); + assertThat("fromString(total) != SUM", ScoreMode.Total, equalTo(HasChildQueryParser.parseScoreMode("sum"))); + assertThat("sum", equalTo(HasChildQueryParser.scoreModeAsString(ScoreMode.Total))); } public void testNoneFromString() { assertThat("fromString(none) != NONE", ScoreMode.None, equalTo(HasChildQueryParser.parseScoreMode("none"))); + assertThat("none", equalTo(HasChildQueryParser.scoreModeAsString(ScoreMode.None))); } /** diff --git a/core/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java index 65fe9d13fd6..272adb3f2f8 100644 --- a/core/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java @@ -23,8 +23,8 @@ import org.apache.lucene.queries.ExtendedCommonTermsQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.FuzzyQuery; +import org.apache.lucene.search.LegacyNumericRangeQuery; import org.apache.lucene.search.MatchAllDocsQuery; -import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; @@ -124,15 +124,15 @@ public class MatchQueryBuilderTests extends AbstractQueryTestCase numericRangeQuery = (NumericRangeQuery) query; + LegacyNumericRangeQuery numericRangeQuery = (LegacyNumericRangeQuery) query; assertTrue(numericRangeQuery.includesMin()); assertTrue(numericRangeQuery.includesMax()); diff --git a/core/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java index a4af84a8f79..238a186394d 100644 --- a/core/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java @@ -25,9 +25,9 @@ import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.FuzzyQuery; +import org.apache.lucene.search.LegacyNumericRangeQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; -import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; @@ -134,7 +134,7 @@ public class MultiMatchQueryBuilderTests extends AbstractQueryTestCase { + + private String indexedDocumentIndex; + private String indexedDocumentType; + private String indexedDocumentId; + private String indexedDocumentRouting; + private String indexedDocumentPreference; + private Long indexedDocumentVersion; + private BytesReference documentSource; + + boolean indexedDocumentExists = true; + + @Override + protected PercolatorQueryBuilder doCreateTestQueryBuilder() { + return doCreateTestQueryBuilder(randomBoolean()); + } + + private PercolatorQueryBuilder doCreateTestQueryBuilder(boolean indexedDocument) { + String docType = randomAsciiOfLength(4); + documentSource = randomSource(); + if (indexedDocument) { + indexedDocumentIndex = randomAsciiOfLength(4); + indexedDocumentType = randomAsciiOfLength(4); + indexedDocumentId = randomAsciiOfLength(4); + indexedDocumentRouting = randomAsciiOfLength(4); + indexedDocumentPreference = randomAsciiOfLength(4); + indexedDocumentVersion = (long) randomIntBetween(0, Integer.MAX_VALUE); + return new PercolatorQueryBuilder(docType, indexedDocumentIndex, indexedDocumentType, indexedDocumentId, + indexedDocumentRouting, indexedDocumentPreference, indexedDocumentVersion); + } else { + return new PercolatorQueryBuilder(docType, documentSource); + } + } + + @Override + protected GetResponse executeGet(GetRequest getRequest) { + assertThat(getRequest.index(), Matchers.equalTo(indexedDocumentIndex)); + assertThat(getRequest.type(), Matchers.equalTo(indexedDocumentType)); + assertThat(getRequest.id(), Matchers.equalTo(indexedDocumentId)); + assertThat(getRequest.routing(), Matchers.equalTo(indexedDocumentRouting)); + assertThat(getRequest.preference(), Matchers.equalTo(indexedDocumentPreference)); + assertThat(getRequest.version(), Matchers.equalTo(indexedDocumentVersion)); + if (indexedDocumentExists) { + return new GetResponse( + new GetResult(indexedDocumentIndex, indexedDocumentType, indexedDocumentId, 0L, true, documentSource, + Collections.emptyMap()) + ); + } else { + return new GetResponse( + new GetResult(indexedDocumentIndex, indexedDocumentType, indexedDocumentId, -1, false, null, Collections.emptyMap()) + ); + } + } + + @Override + protected void doAssertLuceneQuery(PercolatorQueryBuilder queryBuilder, Query query, QueryShardContext context) throws IOException { + assertThat(query, Matchers.instanceOf(PercolatorQuery.class)); + PercolatorQuery percolatorQuery = (PercolatorQuery) query; + assertThat(percolatorQuery.getDocumentType(), Matchers.equalTo(queryBuilder.getDocumentType())); + assertThat(percolatorQuery.getDocumentSource(), Matchers.equalTo(documentSource)); + } + + @Override + public void testMustRewrite() throws IOException { + PercolatorQueryBuilder pqb = doCreateTestQueryBuilder(true); + try { + pqb.toQuery(queryShardContext()); + fail("IllegalStateException expected"); + } catch (IllegalStateException e) { + assertThat(e.getMessage(), equalTo("query builder must be rewritten first")); + } + QueryBuilder rewrite = pqb.rewrite(queryShardContext()); + PercolatorQueryBuilder geoShapeQueryBuilder = new PercolatorQueryBuilder(pqb.getDocumentType(), documentSource); + assertEquals(geoShapeQueryBuilder, rewrite); + } + + public void testIndexedDocumentDoesNotExist() throws IOException { + indexedDocumentExists = false; + PercolatorQueryBuilder pqb = doCreateTestQueryBuilder(true); + try { + pqb.rewrite(queryShardContext()); + fail("ResourceNotFoundException expected"); + } catch (ResourceNotFoundException e) { + String expectedString = "indexed document [" + indexedDocumentIndex + "/" + indexedDocumentType + "/" + + indexedDocumentId + "] couldn't be found"; + assertThat(e.getMessage() , equalTo(expectedString)); + } + } + + // overwrite this test, because adding bogus field to the document part is valid and that would make the test fail + // (the document part represents the document being percolated and any key value pair is allowed there) + public void testUnknownObjectException() throws IOException { + String validQuery = createTestQueryBuilder().toString(); + int endPos = validQuery.indexOf("document"); + if (endPos == -1) { + endPos = validQuery.length(); + } + assertThat(validQuery, containsString("{")); + for (int insertionPosition = 0; insertionPosition < endPos; insertionPosition++) { + if (validQuery.charAt(insertionPosition) == '{') { + String testQuery = validQuery.substring(0, insertionPosition) + "{ \"newField\" : " + + validQuery.substring(insertionPosition) + "}"; + try { + parseQuery(testQuery); + fail("some parsing exception expected for query: " + testQuery); + } catch (ParsingException | Script.ScriptParseException | ElasticsearchParseException e) { + // different kinds of exception wordings depending on location + // of mutation, so no simple asserts possible here + } catch (JsonParseException e) { + // mutation produced invalid json + } + } + } + } + + public void testRequiredParameters() { + try { + QueryBuilders.percolatorQuery(null, new BytesArray("{}")); + fail("IllegalArgumentException expected"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), equalTo("[document_type] is a required argument")); + } + try { + QueryBuilders.percolatorQuery("_document_type", null); + fail("IllegalArgumentException expected"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), equalTo("[document] is a required argument")); + } + try { + QueryBuilders.percolatorQuery(null, "_index", "_type", "_id", null, null, null); + fail("IllegalArgumentException expected"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), equalTo("[document_type] is a required argument")); + } + try { + QueryBuilders.percolatorQuery("_document_type", null, "_type", "_id", null, null, null); + fail("IllegalArgumentException expected"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), equalTo("[index] is a required argument")); + } + try { + QueryBuilders.percolatorQuery("_document_type", "_index", null, "_id", null, null, null); + fail("IllegalArgumentException expected"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), equalTo("[type] is a required argument")); + } + try { + QueryBuilders.percolatorQuery("_document_type", "_index", "_type", null, null, null, null); + fail("IllegalArgumentException expected"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), equalTo("[id] is a required argument")); + } + } + + public void testFromJsonNoDocumentType() throws IOException { + try { + parseQuery("{\"percolator\" : { \"document\": {}}"); + fail("IllegalArgumentException expected"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), equalTo("[percolator] query is missing required [document_type] parameter")); + } + } + + private static BytesReference randomSource() { + try { + XContentBuilder xContent = XContentFactory.jsonBuilder(); + xContent.map(RandomDocumentPicks.randomSource(random())); + return xContent.bytes(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/percolator/PercolatorQueryTests.java b/core/src/test/java/org/elasticsearch/index/query/PercolatorQueryTests.java similarity index 83% rename from core/src/test/java/org/elasticsearch/percolator/PercolatorQueryTests.java rename to core/src/test/java/org/elasticsearch/index/query/PercolatorQueryTests.java index 170b0be30df..347802d3770 100644 --- a/core/src/test/java/org/elasticsearch/percolator/PercolatorQueryTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/PercolatorQueryTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.percolator; +package org.elasticsearch.index.query; import org.apache.lucene.analysis.core.WhitespaceAnalyzer; import org.apache.lucene.document.Field; @@ -44,11 +44,12 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.WildcardQuery; import org.apache.lucene.store.Directory; -import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.mapper.internal.UidFieldMapper; import org.elasticsearch.index.percolator.ExtractQueryTermsService; +import org.elasticsearch.index.percolator.PercolatorFieldMapper; import org.elasticsearch.test.ESTestCase; import org.junit.After; import org.junit.Before; @@ -75,13 +76,22 @@ public class PercolatorQueryTests extends ESTestCase { private Directory directory; private IndexWriter indexWriter; - private Map queries; + private Map queries; + private PercolatorQuery.QueryRegistry queryRegistry; private DirectoryReader directoryReader; @Before public void init() throws Exception { directory = newDirectory(); queries = new HashMap<>(); + queryRegistry = ctx -> docId -> { + try { + String val = ctx.reader().document(docId).get(UidFieldMapper.NAME); + return queries.get(Uid.createUid(val).id()); + } catch (IOException e) { + throw new RuntimeException(e); + } + }; IndexWriterConfig config = new IndexWriterConfig(new WhitespaceAnalyzer()); config.setMergePolicy(NoMergePolicy.INSTANCE); indexWriter = new IndexWriter(directory, config); @@ -127,8 +137,10 @@ public class PercolatorQueryTests extends ESTestCase { IndexSearcher percolateSearcher = memoryIndex.createSearcher(); PercolatorQuery.Builder builder = new PercolatorQuery.Builder( + "docType", + queryRegistry, + new BytesArray("{}"), percolateSearcher, - queries, new MatchAllDocsQuery() ); builder.extractQueryTermsQuery(EXTRACTED_TERMS_FIELD_NAME, UNKNOWN_QUERY_FIELD_NAME); @@ -142,37 +154,6 @@ public class PercolatorQueryTests extends ESTestCase { assertThat(topDocs.scoreDocs[4].doc, equalTo(7)); } - public void testWithScoring() throws Exception { - addPercolatorQuery("1", new TermQuery(new Term("field", "brown")), "field", "value1"); - - indexWriter.close(); - directoryReader = DirectoryReader.open(directory); - IndexSearcher shardSearcher = newSearcher(directoryReader); - - MemoryIndex memoryIndex = new MemoryIndex(); - memoryIndex.addField("field", "the quick brown fox jumps over the lazy dog", new WhitespaceAnalyzer()); - IndexSearcher percolateSearcher = memoryIndex.createSearcher(); - - PercolatorQuery.Builder builder = new PercolatorQuery.Builder( - percolateSearcher, - queries, - new MatchAllDocsQuery() - ); - builder.extractQueryTermsQuery(EXTRACTED_TERMS_FIELD_NAME, UNKNOWN_QUERY_FIELD_NAME); - builder.setPercolateQuery(new TermQuery(new Term("field", "value1"))); - - PercolatorQuery percolatorQuery = builder.build(); - TopDocs topDocs = shardSearcher.search(percolatorQuery, 1); - assertThat(topDocs.totalHits, equalTo(1)); - assertThat(topDocs.scoreDocs.length, equalTo(1)); - assertThat(topDocs.scoreDocs[0].doc, equalTo(0)); - assertThat(topDocs.scoreDocs[0].score, not(1f)); - - Explanation explanation = shardSearcher.explain(percolatorQuery, 0); - assertThat(explanation.isMatch(), is(true)); - assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[0].score)); - } - public void testDuel() throws Exception { int numQueries = scaledRandomIntBetween(32, 256); for (int i = 0; i < numQueries; i++) { @@ -201,8 +182,10 @@ public class PercolatorQueryTests extends ESTestCase { IndexSearcher percolateSearcher = memoryIndex.createSearcher(); PercolatorQuery.Builder builder1 = new PercolatorQuery.Builder( + "docType", + queryRegistry, + new BytesArray("{}"), percolateSearcher, - queries, new MatchAllDocsQuery() ); // enables the optimization that prevents queries from being evaluated that don't match @@ -210,8 +193,10 @@ public class PercolatorQueryTests extends ESTestCase { TopDocs topDocs1 = shardSearcher.search(builder1.build(), 10); PercolatorQuery.Builder builder2 = new PercolatorQuery.Builder( + "docType", + queryRegistry, + new BytesArray("{}"), percolateSearcher, - queries, new MatchAllDocsQuery() ); TopDocs topDocs2 = shardSearcher.search(builder2.build(), 10); @@ -225,10 +210,11 @@ public class PercolatorQueryTests extends ESTestCase { } void addPercolatorQuery(String id, Query query, String... extraFields) throws IOException { - queries.put(new BytesRef(id), query); + queries.put(id, query); ParseContext.Document document = new ParseContext.Document(); - ExtractQueryTermsService.extractQueryTerms(query, document, EXTRACTED_TERMS_FIELD_NAME, UNKNOWN_QUERY_FIELD_NAME, EXTRACTED_TERMS_FIELD_TYPE); - document.add(new StoredField(UidFieldMapper.NAME, Uid.createUid(PercolatorService.TYPE_NAME, id))); + ExtractQueryTermsService.extractQueryTerms(query, document, EXTRACTED_TERMS_FIELD_NAME, UNKNOWN_QUERY_FIELD_NAME, + EXTRACTED_TERMS_FIELD_TYPE); + document.add(new StoredField(UidFieldMapper.NAME, Uid.createUid(PercolatorFieldMapper.TYPE_NAME, id))); assert extraFields.length % 2 == 0; for (int i = 0; i < extraFields.length; i++) { document.add(new StringField(extraFields[i], extraFields[++i], Field.Store.NO)); diff --git a/core/src/test/java/org/elasticsearch/index/query/QueryShardContextTests.java b/core/src/test/java/org/elasticsearch/index/query/QueryShardContextTests.java index 110c93d429c..f705db3a537 100644 --- a/core/src/test/java/org/elasticsearch/index/query/QueryShardContextTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/QueryShardContextTests.java @@ -47,7 +47,7 @@ public class QueryShardContextTests extends ESTestCase { MapperService mapperService = mock(MapperService.class); when(mapperService.getIndexSettings()).thenReturn(indexSettings); QueryShardContext context = new QueryShardContext( - indexSettings, null, null, mapperService, null, null, null + indexSettings, null, null, mapperService, null, null, null, null ); context.setAllowUnmappedFields(false); diff --git a/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index 6a20bc78ab8..5a42331c52d 100644 --- a/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -24,9 +24,9 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.DisjunctionMaxQuery; +import org.apache.lucene.search.LegacyNumericRangeQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; -import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.RegexpQuery; @@ -301,7 +301,7 @@ public class QueryStringQueryBuilderTests extends AbstractQueryTestCase 0); Query query = queryStringQuery("12~0.2").defaultField(INT_FIELD_NAME).toQuery(createShardContext()); - NumericRangeQuery fuzzyQuery = (NumericRangeQuery) query; + LegacyNumericRangeQuery fuzzyQuery = (LegacyNumericRangeQuery) query; assertThat(fuzzyQuery.getMin().longValue(), equalTo(12L)); assertThat(fuzzyQuery.getMax().longValue(), equalTo(12L)); } diff --git a/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java index fbb708a5d97..30e32c92da2 100644 --- a/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java @@ -19,14 +19,19 @@ package org.elasticsearch.index.query; -import org.apache.lucene.search.NumericRangeQuery; +import org.apache.lucene.search.LegacyNumericRangeQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermRangeQuery; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.joda.DateMathParser; import org.elasticsearch.common.lucene.BytesRefs; +import org.elasticsearch.index.fieldstats.FieldStatsProvider; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; +import org.joda.time.chrono.ISOChronology; import java.io.IOException; import java.util.HashMap; @@ -38,6 +43,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.sameInstance; public class RangeQueryBuilderTests extends AbstractQueryTestCase { @@ -118,8 +124,8 @@ public class RangeQueryBuilderTests extends AbstractQueryTestCase 0); Query parsedQuery = rangeQuery(INT_FIELD_NAME).from(23).to(54).includeLower(true).includeUpper(false).toQuery(createShardContext()); // since age is automatically registered in data, we encode it as numeric - assertThat(parsedQuery, instanceOf(NumericRangeQuery.class)); - NumericRangeQuery rangeQuery = (NumericRangeQuery) parsedQuery; + assertThat(parsedQuery, instanceOf(LegacyNumericRangeQuery.class)); + LegacyNumericRangeQuery rangeQuery = (LegacyNumericRangeQuery) parsedQuery; assertThat(rangeQuery.getField(), equalTo(INT_FIELD_NAME)); assertThat(rangeQuery.getMin().intValue(), equalTo(23)); assertThat(rangeQuery.getMax().intValue(), equalTo(54)); @@ -220,15 +226,15 @@ public class RangeQueryBuilderTests extends AbstractQueryTestCase> FieldStats get(String field) throws IOException { + assertThat(field, equalTo(fieldName)); + return (FieldStats) new FieldStats.Long(randomLong(), randomLong(), randomLong(), randomLong(), shardMinValue, + shardMaxValue); + } + }; + queryShardContext.setFieldStatsProvider(fieldStatsProvider); + QueryBuilder rewritten = query.rewrite(queryShardContext); + assertThat(rewritten, instanceOf(RangeQueryBuilder.class)); + RangeQueryBuilder rewrittenRange = (RangeQueryBuilder) rewritten; + assertThat(rewrittenRange.fieldName(), equalTo(fieldName)); + assertThat(rewrittenRange.from(), equalTo(shardMinValue)); + assertThat(rewrittenRange.to(), equalTo(shardMaxValue)); + assertThat(rewrittenRange.includeLower(), equalTo(true)); + assertThat(rewrittenRange.includeUpper(), equalTo(true)); + } + + public void testRewriteLongToMatchNone() throws IOException { + String fieldName = randomAsciiOfLengthBetween(1, 20); + RangeQueryBuilder query = new RangeQueryBuilder(fieldName); + int queryFromValue = randomIntBetween(-1000000, 1000000); + int queryToValue = randomIntBetween(queryFromValue, 2000000); + query.from((long) queryFromValue); + query.to((long) queryToValue); + QueryShardContext queryShardContext = queryShardContext(); + FieldStatsProvider fieldStatsProvider = new FieldStatsProvider(null, null) { + + @Override + public Relation isFieldWithinQuery(String fieldName, Object from, Object to, boolean includeLower, boolean includeUpper, + DateTimeZone timeZone, DateMathParser dateMathParser) throws IOException { + return Relation.DISJOINT; + } + }; + queryShardContext.setFieldStatsProvider(fieldStatsProvider); + QueryBuilder rewritten = query.rewrite(queryShardContext); + assertThat(rewritten, instanceOf(MatchNoneQueryBuilder.class)); + } + + public void testRewriteLongToSame() throws IOException { + String fieldName = randomAsciiOfLengthBetween(1, 20); + RangeQueryBuilder query = new RangeQueryBuilder(fieldName); + int queryFromValue = randomIntBetween(-1000000, 1000000); + int queryToValue = randomIntBetween(queryFromValue, 2000000); + query.from((long) queryFromValue); + query.to((long) queryToValue); + QueryShardContext queryShardContext = queryShardContext(); + FieldStatsProvider fieldStatsProvider = new FieldStatsProvider(null, null) { + + @Override + public Relation isFieldWithinQuery(String fieldName, Object from, Object to, boolean includeLower, boolean includeUpper, + DateTimeZone timeZone, DateMathParser dateMathParser) throws IOException { + return Relation.INTERSECTS; + } + }; + queryShardContext.setFieldStatsProvider(fieldStatsProvider); + QueryBuilder rewritten = query.rewrite(queryShardContext); + assertThat(rewritten, sameInstance(query)); + } + + public void testRewriteDoubleToMatchAll() throws IOException { + String fieldName = randomAsciiOfLengthBetween(1, 20); + RangeQueryBuilder query = new RangeQueryBuilder(fieldName); + double queryFromValue = randomDoubleBetween(-1000000.0, 1000000.0, true); + double queryToValue = randomDoubleBetween(queryFromValue, 2000000, true); + double shardMinValue = randomDoubleBetween(queryFromValue, queryToValue, true); + double shardMaxValue = randomDoubleBetween(shardMinValue, queryToValue, true); + query.from(queryFromValue); + query.to(queryToValue); + QueryShardContext queryShardContext = queryShardContext(); + FieldStatsProvider fieldStatsProvider = new FieldStatsProvider(null, null) { + + @Override + public Relation isFieldWithinQuery(String fieldName, Object from, Object to, boolean includeLower, boolean includeUpper, + DateTimeZone timeZone, DateMathParser dateMathParser) throws IOException { + return Relation.WITHIN; + } + + @SuppressWarnings("unchecked") + @Override + public > FieldStats get(String field) throws IOException { + assertThat(field, equalTo(fieldName)); + return (FieldStats) new FieldStats.Double(randomLong(), randomLong(), randomLong(), randomLong(), shardMinValue, + shardMaxValue); + } + }; + queryShardContext.setFieldStatsProvider(fieldStatsProvider); + QueryBuilder rewritten = query.rewrite(queryShardContext); + assertThat(rewritten, instanceOf(RangeQueryBuilder.class)); + RangeQueryBuilder rewrittenRange = (RangeQueryBuilder) rewritten; + assertThat(rewrittenRange.fieldName(), equalTo(fieldName)); + assertThat(rewrittenRange.from(), equalTo(shardMinValue)); + assertThat(rewrittenRange.to(), equalTo(shardMaxValue)); + assertThat(rewrittenRange.includeLower(), equalTo(true)); + assertThat(rewrittenRange.includeUpper(), equalTo(true)); + } + + public void testRewriteDoubleToMatchNone() throws IOException { + String fieldName = randomAsciiOfLengthBetween(1, 20); + RangeQueryBuilder query = new RangeQueryBuilder(fieldName); + double queryFromValue = randomDoubleBetween(-1000000, 1000000, true); + double queryToValue = randomDoubleBetween(queryFromValue, 2000000, true); + query.from(queryFromValue); + query.to(queryToValue); + QueryShardContext queryShardContext = queryShardContext(); + FieldStatsProvider fieldStatsProvider = new FieldStatsProvider(null, null) { + + @Override + public Relation isFieldWithinQuery(String fieldName, Object from, Object to, boolean includeLower, boolean includeUpper, + DateTimeZone timeZone, DateMathParser dateMathParser) throws IOException { + return Relation.DISJOINT; + } + }; + queryShardContext.setFieldStatsProvider(fieldStatsProvider); + QueryBuilder rewritten = query.rewrite(queryShardContext); + assertThat(rewritten, instanceOf(MatchNoneQueryBuilder.class)); + } + + public void testRewriteDoubleToSame() throws IOException { + String fieldName = randomAsciiOfLengthBetween(1, 20); + RangeQueryBuilder query = new RangeQueryBuilder(fieldName); + double queryFromValue = randomDoubleBetween(-1000000, 1000000, true); + double queryToValue = randomDoubleBetween(queryFromValue, 2000000, true); + query.from(queryFromValue); + query.to(queryToValue); + QueryShardContext queryShardContext = queryShardContext(); + FieldStatsProvider fieldStatsProvider = new FieldStatsProvider(null, null) { + + @Override + public Relation isFieldWithinQuery(String fieldName, Object from, Object to, boolean includeLower, boolean includeUpper, + DateTimeZone timeZone, DateMathParser dateMathParser) throws IOException { + return Relation.INTERSECTS; + } + }; + queryShardContext.setFieldStatsProvider(fieldStatsProvider); + QueryBuilder rewritten = query.rewrite(queryShardContext); + assertThat(rewritten, sameInstance(query)); + } + + public void testRewriteFloatToMatchAll() throws IOException { + String fieldName = randomAsciiOfLengthBetween(1, 20); + RangeQueryBuilder query = new RangeQueryBuilder(fieldName); + float queryFromValue = (float) randomDoubleBetween(-1000000.0, 1000000.0, true); + float queryToValue = (float) randomDoubleBetween(queryFromValue, 2000000, true); + float shardMinValue = (float) randomDoubleBetween(queryFromValue, queryToValue, true); + float shardMaxValue = (float) randomDoubleBetween(shardMinValue, queryToValue, true); + query.from(queryFromValue); + query.to(queryToValue); + QueryShardContext queryShardContext = queryShardContext(); + FieldStatsProvider fieldStatsProvider = new FieldStatsProvider(null, null) { + + @Override + public Relation isFieldWithinQuery(String fieldName, Object from, Object to, boolean includeLower, boolean includeUpper, + DateTimeZone timeZone, DateMathParser dateMathParser) throws IOException { + return Relation.WITHIN; + } + + @SuppressWarnings("unchecked") + @Override + public > FieldStats get(String field) throws IOException { + assertThat(field, equalTo(fieldName)); + return (FieldStats) new FieldStats.Float(randomLong(), randomLong(), randomLong(), randomLong(), shardMinValue, + shardMaxValue); + } + }; + queryShardContext.setFieldStatsProvider(fieldStatsProvider); + QueryBuilder rewritten = query.rewrite(queryShardContext); + assertThat(rewritten, instanceOf(RangeQueryBuilder.class)); + RangeQueryBuilder rewrittenRange = (RangeQueryBuilder) rewritten; + assertThat(rewrittenRange.fieldName(), equalTo(fieldName)); + assertThat(rewrittenRange.from(), equalTo(shardMinValue)); + assertThat(rewrittenRange.to(), equalTo(shardMaxValue)); + assertThat(rewrittenRange.includeLower(), equalTo(true)); + assertThat(rewrittenRange.includeUpper(), equalTo(true)); + } + + public void testRewriteFloatToMatchNone() throws IOException { + String fieldName = randomAsciiOfLengthBetween(1, 20); + RangeQueryBuilder query = new RangeQueryBuilder(fieldName); + float queryFromValue = (float) randomDoubleBetween(-1000000, 1000000, true); + float queryToValue = (float) randomDoubleBetween(queryFromValue, 2000000, true); + query.from(queryFromValue); + query.to(queryToValue); + QueryShardContext queryShardContext = queryShardContext(); + FieldStatsProvider fieldStatsProvider = new FieldStatsProvider(null, null) { + + @Override + public Relation isFieldWithinQuery(String fieldName, Object from, Object to, boolean includeLower, boolean includeUpper, + DateTimeZone timeZone, DateMathParser dateMathParser) throws IOException { + return Relation.DISJOINT; + } + }; + queryShardContext.setFieldStatsProvider(fieldStatsProvider); + QueryBuilder rewritten = query.rewrite(queryShardContext); + assertThat(rewritten, instanceOf(MatchNoneQueryBuilder.class)); + } + + public void testRewriteFloatToSame() throws IOException { + String fieldName = randomAsciiOfLengthBetween(1, 20); + RangeQueryBuilder query = new RangeQueryBuilder(fieldName); + float queryFromValue = (float) randomDoubleBetween(-1000000, 1000000, true); + float queryToValue = (float) randomDoubleBetween(queryFromValue, 2000000, true); + query.from(queryFromValue); + query.to(queryToValue); + QueryShardContext queryShardContext = queryShardContext(); + FieldStatsProvider fieldStatsProvider = new FieldStatsProvider(null, null) { + + @Override + public Relation isFieldWithinQuery(String fieldName, Object from, Object to, boolean includeLower, boolean includeUpper, + DateTimeZone timeZone, DateMathParser dateMathParser) throws IOException { + return Relation.INTERSECTS; + } + }; + queryShardContext.setFieldStatsProvider(fieldStatsProvider); + QueryBuilder rewritten = query.rewrite(queryShardContext); + assertThat(rewritten, sameInstance(query)); + } + + public void testRewriteTextToMatchAll() throws IOException { + String fieldName = randomAsciiOfLengthBetween(1, 20); + RangeQueryBuilder query = new RangeQueryBuilder(fieldName); + String queryFromValue = "damson"; + String queryToValue = "plum"; + String shardMinValue = "grape"; + String shardMaxValue = "orange"; + query.from(queryFromValue); + query.to(queryToValue); + QueryShardContext queryShardContext = queryShardContext(); + FieldStatsProvider fieldStatsProvider = new FieldStatsProvider(null, null) { + + @Override + public Relation isFieldWithinQuery(String fieldName, Object from, Object to, boolean includeLower, boolean includeUpper, + DateTimeZone timeZone, DateMathParser dateMathParser) throws IOException { + return Relation.WITHIN; + } + + @SuppressWarnings("unchecked") + @Override + public > FieldStats get(String field) throws IOException { + assertThat(field, equalTo(fieldName)); + return (FieldStats) new FieldStats.Text(randomLong(), randomLong(), randomLong(), randomLong(), + new BytesRef(shardMinValue), new BytesRef(shardMaxValue)); + } + }; + queryShardContext.setFieldStatsProvider(fieldStatsProvider); + QueryBuilder rewritten = query.rewrite(queryShardContext); + assertThat(rewritten, instanceOf(RangeQueryBuilder.class)); + RangeQueryBuilder rewrittenRange = (RangeQueryBuilder) rewritten; + assertThat(rewrittenRange.fieldName(), equalTo(fieldName)); + assertThat(rewrittenRange.from(), equalTo(shardMinValue)); + assertThat(rewrittenRange.to(), equalTo(shardMaxValue)); + assertThat(rewrittenRange.includeLower(), equalTo(true)); + assertThat(rewrittenRange.includeUpper(), equalTo(true)); + } + + public void testRewriteTextToMatchNone() throws IOException { + String fieldName = randomAsciiOfLengthBetween(1, 20); + RangeQueryBuilder query = new RangeQueryBuilder(fieldName); + String queryFromValue = "damson"; + String queryToValue = "plum"; + query.from(queryFromValue); + query.to(queryToValue); + QueryShardContext queryShardContext = queryShardContext(); + FieldStatsProvider fieldStatsProvider = new FieldStatsProvider(null, null) { + + @Override + public Relation isFieldWithinQuery(String fieldName, Object from, Object to, boolean includeLower, boolean includeUpper, + DateTimeZone timeZone, DateMathParser dateMathParser) throws IOException { + return Relation.DISJOINT; + } + }; + queryShardContext.setFieldStatsProvider(fieldStatsProvider); + QueryBuilder rewritten = query.rewrite(queryShardContext); + assertThat(rewritten, instanceOf(MatchNoneQueryBuilder.class)); + } + + public void testRewriteTextToSame() throws IOException { + String fieldName = randomAsciiOfLengthBetween(1, 20); + RangeQueryBuilder query = new RangeQueryBuilder(fieldName); + String queryFromValue = "damson"; + String queryToValue = "plum"; + query.from(queryFromValue); + query.to(queryToValue); + QueryShardContext queryShardContext = queryShardContext(); + FieldStatsProvider fieldStatsProvider = new FieldStatsProvider(null, null) { + + @Override + public Relation isFieldWithinQuery(String fieldName, Object from, Object to, boolean includeLower, boolean includeUpper, + DateTimeZone timeZone, DateMathParser dateMathParser) throws IOException { + return Relation.INTERSECTS; + } + }; + queryShardContext.setFieldStatsProvider(fieldStatsProvider); + QueryBuilder rewritten = query.rewrite(queryShardContext); + assertThat(rewritten, sameInstance(query)); + } + + public void testRewriteDateToMatchAll() throws IOException { + String fieldName = randomAsciiOfLengthBetween(1, 20); + RangeQueryBuilder query = new RangeQueryBuilder(fieldName); + DateTime queryFromValue = new DateTime(2015, 1, 1, 0, 0, 0, ISOChronology.getInstanceUTC()); + DateTime queryToValue = new DateTime(2016, 1, 1, 0, 0, 0, ISOChronology.getInstanceUTC()); + DateTime shardMinValue = new DateTime(2015, 3, 1, 0, 0, 0, ISOChronology.getInstanceUTC()); + DateTime shardMaxValue = new DateTime(2015, 9, 1, 0, 0, 0, ISOChronology.getInstanceUTC()); + query.from(queryFromValue); + query.to(queryToValue); + QueryShardContext queryShardContext = queryShardContext(); + FieldStatsProvider fieldStatsProvider = new FieldStatsProvider(null, null) { + + @Override + public Relation isFieldWithinQuery(String fieldName, Object from, Object to, boolean includeLower, boolean includeUpper, + DateTimeZone timeZone, DateMathParser dateMathParser) throws IOException { + return Relation.WITHIN; + } + + @SuppressWarnings("unchecked") + @Override + public > FieldStats get(String field) throws IOException { + assertThat(field, equalTo(fieldName)); + return (FieldStats) new FieldStats.Date(randomLong(), randomLong(), randomLong(), randomLong(), + shardMinValue.getMillis(), shardMaxValue.getMillis(), null); + } + }; + queryShardContext.setFieldStatsProvider(fieldStatsProvider); + QueryBuilder rewritten = query.rewrite(queryShardContext); + assertThat(rewritten, instanceOf(RangeQueryBuilder.class)); + RangeQueryBuilder rewrittenRange = (RangeQueryBuilder) rewritten; + assertThat(rewrittenRange.fieldName(), equalTo(fieldName)); + assertThat(rewrittenRange.from(), equalTo(shardMinValue.getMillis())); + assertThat(rewrittenRange.to(), equalTo(shardMaxValue.getMillis())); + assertThat(rewrittenRange.includeLower(), equalTo(true)); + assertThat(rewrittenRange.includeUpper(), equalTo(true)); + } + + public void testRewriteDateToMatchNone() throws IOException { + String fieldName = randomAsciiOfLengthBetween(1, 20); + RangeQueryBuilder query = new RangeQueryBuilder(fieldName); + DateTime queryFromValue = new DateTime(2015, 1, 1, 0, 0, 0, ISOChronology.getInstanceUTC()); + DateTime queryToValue = new DateTime(2016, 1, 1, 0, 0, 0, ISOChronology.getInstanceUTC()); + query.from(queryFromValue); + query.to(queryToValue); + QueryShardContext queryShardContext = queryShardContext(); + FieldStatsProvider fieldStatsProvider = new FieldStatsProvider(null, null) { + + @Override + public Relation isFieldWithinQuery(String fieldName, Object from, Object to, boolean includeLower, boolean includeUpper, + DateTimeZone timeZone, DateMathParser dateMathParser) throws IOException { + return Relation.DISJOINT; + } + }; + queryShardContext.setFieldStatsProvider(fieldStatsProvider); + QueryBuilder rewritten = query.rewrite(queryShardContext); + assertThat(rewritten, instanceOf(MatchNoneQueryBuilder.class)); + } + + public void testRewriteDateToSame() throws IOException { + String fieldName = randomAsciiOfLengthBetween(1, 20); + RangeQueryBuilder query = new RangeQueryBuilder(fieldName); + DateTime queryFromValue = new DateTime(2015, 1, 1, 0, 0, 0, ISOChronology.getInstanceUTC()); + DateTime queryToValue = new DateTime(2016, 1, 1, 0, 0, 0, ISOChronology.getInstanceUTC()); + query.from(queryFromValue); + query.to(queryToValue); + QueryShardContext queryShardContext = queryShardContext(); + FieldStatsProvider fieldStatsProvider = new FieldStatsProvider(null, null) { + + @Override + public Relation isFieldWithinQuery(String fieldName, Object from, Object to, boolean includeLower, boolean includeUpper, + DateTimeZone timeZone, DateMathParser dateMathParser) throws IOException { + return Relation.INTERSECTS; + } + }; + queryShardContext.setFieldStatsProvider(fieldStatsProvider); + QueryBuilder rewritten = query.rewrite(queryShardContext); + assertThat(rewritten, sameInstance(query)); + } } diff --git a/core/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java index c5cfa7ebd36..560ee1881f0 100644 --- a/core/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java @@ -249,8 +249,7 @@ public class SimpleQueryStringBuilderTests extends AbstractQueryTestCase 0 || shardContext.indexVersionCreated().before(Version.V_1_4_0_Beta1)) { + if (getCurrentTypes().length > 0) { Query luceneQuery = queryBuilder.toQuery(shardContext); assertThat(luceneQuery, instanceOf(TermQuery.class)); TermQuery termQuery = (TermQuery) luceneQuery; diff --git a/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java b/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java index 4ed78e3f5ff..4b6788d463f 100644 --- a/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java @@ -276,7 +276,7 @@ public class FunctionScoreTests extends ESTestCase { d.add(new TextField("_uid", "1", Field.Store.YES)); w.addDocument(d); w.commit(); - reader = DirectoryReader.open(w, true); + reader = DirectoryReader.open(w); searcher = newSearcher(reader); } @@ -634,13 +634,11 @@ public class FunctionScoreTests extends ESTestCase { ScoreFunction otherFunciton = function == null ? new DummyScoreFunction(combineFunction) : null; FunctionScoreQuery diffFunction = new FunctionScoreQuery(q.getSubQuery(), otherFunciton, minScore, combineFunction, maxBoost); FunctionScoreQuery diffMaxBoost = new FunctionScoreQuery(new TermQuery(new Term("foo", "bar")), function, minScore, combineFunction, maxBoost == 1.0f ? 0.9f : 1.0f); - q1.setBoost(3.0f); FunctionScoreQuery[] queries = new FunctionScoreQuery[] { diffFunction, diffMinScore, diffQuery, q, - q1, diffMaxBoost }; final int numIters = randomIntBetween(20, 100); @@ -678,7 +676,6 @@ public class FunctionScoreTests extends ESTestCase { FiltersFunctionScoreQuery diffMinScore = new FiltersFunctionScoreQuery(new TermQuery(new Term("foo", "bar")), mode, new FilterFunction[] {function}, maxBoost, minScore == null ? 0.9f : null, combineFunction); FilterFunction otherFunc = new FilterFunction(new TermQuery(new Term("filter", "other_query")), scoreFunction); FiltersFunctionScoreQuery diffFunc = new FiltersFunctionScoreQuery(new TermQuery(new Term("foo", "bar")), mode, randomBoolean() ? new FilterFunction[] {function, otherFunc} : new FilterFunction[] {otherFunc}, maxBoost, minScore, combineFunction); - q1.setBoost(3.0f); FiltersFunctionScoreQuery[] queries = new FiltersFunctionScoreQuery[] { diffQuery, @@ -687,7 +684,6 @@ public class FunctionScoreTests extends ESTestCase { diffMode, diffFunc, q, - q1, diffCombineFunc }; final int numIters = randomIntBetween(20, 100); diff --git a/core/src/test/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java b/core/src/test/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java index 1758d95a554..ec405bd8407 100644 --- a/core/src/test/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java +++ b/core/src/test/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java @@ -68,7 +68,7 @@ public class CustomQueryParserIT extends ESIntegTestCase { private static QueryShardContext queryShardContext() { IndicesService indicesService = internalCluster().getDataNodeInstance(IndicesService.class); - return indicesService.indexServiceSafe("index").newQueryShardContext(); + return indicesService.indexServiceSafe(resolveIndex("index")).newQueryShardContext(); } //see #11120 diff --git a/core/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java b/core/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java index 44b91679623..d3c9975cf58 100644 --- a/core/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java +++ b/core/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java @@ -19,8 +19,8 @@ package org.elasticsearch.index.search.geo; -import com.spatial4j.core.context.SpatialContext; -import com.spatial4j.core.distance.DistanceUtils; +import org.locationtech.spatial4j.context.SpatialContext; +import org.locationtech.spatial4j.distance.DistanceUtils; import org.apache.lucene.spatial.prefix.tree.Cell; import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree; import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; diff --git a/core/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTestCase.java b/core/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTestCase.java index 4ef84d118fd..5b5b24bbe4b 100644 --- a/core/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTestCase.java @@ -218,7 +218,7 @@ public abstract class AbstractNumberNestedSortingTestCase extends AbstractFieldD writer.addDocument(document); MultiValueMode sortMode = MultiValueMode.SUM; - DirectoryReader directoryReader = DirectoryReader.open(writer, false); + DirectoryReader directoryReader = DirectoryReader.open(writer); directoryReader = ElasticsearchDirectoryReader.wrap(directoryReader, new ShardId(indexService.index(), 0)); IndexSearcher searcher = new IndexSearcher(directoryReader); Query parentFilter = new TermQuery(new Term("__type", "parent")); diff --git a/core/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java b/core/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java index ff82b7c43ac..d5e9ff85ba3 100644 --- a/core/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java +++ b/core/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java @@ -87,13 +87,13 @@ public class NestedSortingTests extends AbstractFieldDataTestCase { docs.add(parent); writer.addDocuments(docs); if (rarely()) { // we need to have a bit more segments than what RandomIndexWriter would do by default - DirectoryReader.open(writer, false).close(); + DirectoryReader.open(writer).close(); } } writer.commit(); MultiValueMode sortMode = randomFrom(Arrays.asList(MultiValueMode.MIN, MultiValueMode.MAX)); - DirectoryReader reader = DirectoryReader.open(writer, false); + DirectoryReader reader = DirectoryReader.open(writer); reader = ElasticsearchDirectoryReader.wrap(reader, new ShardId(indexService.index(), 0)); IndexSearcher searcher = new IndexSearcher(reader); PagedBytesIndexFieldData indexFieldData1 = getForField("f"); @@ -278,7 +278,7 @@ public class NestedSortingTests extends AbstractFieldDataTestCase { writer.addDocument(document); MultiValueMode sortMode = MultiValueMode.MIN; - DirectoryReader reader = DirectoryReader.open(writer, false); + DirectoryReader reader = DirectoryReader.open(writer); reader = ElasticsearchDirectoryReader.wrap(reader, new ShardId(indexService.index(), 0)); IndexSearcher searcher = new IndexSearcher(reader); PagedBytesIndexFieldData indexFieldData = getForField("field2"); diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexSearcherWrapperTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexSearcherWrapperTests.java index cf95f22ae3b..cfadab6efb8 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexSearcherWrapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexSearcherWrapperTests.java @@ -56,7 +56,7 @@ public class IndexSearcherWrapperTests extends ESTestCase { doc.add(new StringField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); doc.add(new TextField("field", "doc", random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); writer.addDocument(doc); - DirectoryReader open = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer, true), new ShardId("foo", "_na_", 1)); + DirectoryReader open = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "_na_", 1)); IndexSearcher searcher = new IndexSearcher(open); assertEquals(1, searcher.search(new TermQuery(new Term("field", "doc")), 1).totalHits); final AtomicInteger closeCalls = new AtomicInteger(0); @@ -106,7 +106,7 @@ public class IndexSearcherWrapperTests extends ESTestCase { doc.add(new StringField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); doc.add(new TextField("field", "doc", random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); writer.addDocument(doc); - DirectoryReader open = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer, true), new ShardId("foo", "_na_", 1)); + DirectoryReader open = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "_na_", 1)); IndexSearcher searcher = new IndexSearcher(open); assertEquals(1, searcher.search(new TermQuery(new Term("field", "doc")), 1).totalHits); searcher.setSimilarity(iwc.getSimilarity()); @@ -148,7 +148,7 @@ public class IndexSearcherWrapperTests extends ESTestCase { doc.add(new StringField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); doc.add(new TextField("field", "doc", random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); writer.addDocument(doc); - DirectoryReader open = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer, true), new ShardId("foo", "_na_", 1)); + DirectoryReader open = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "_na_", 1)); IndexSearcher searcher = new IndexSearcher(open); assertEquals(1, searcher.search(new TermQuery(new Term("field", "doc")), 1).totalHits); searcher.setSimilarity(iwc.getSimilarity()); @@ -168,7 +168,7 @@ public class IndexSearcherWrapperTests extends ESTestCase { doc.add(new StringField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); doc.add(new TextField("field", "doc", random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); writer.addDocument(doc); - DirectoryReader open = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer, true), new ShardId("foo", "_na_", 1)); + DirectoryReader open = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "_na_", 1)); IndexSearcher searcher = new IndexSearcher(open); assertEquals(1, searcher.search(new TermQuery(new Term("field", "doc")), 1).totalHits); searcher.setSimilarity(iwc.getSimilarity()); diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index e70ca9ec6de..cf4a0e2e0aa 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -40,7 +40,6 @@ import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterInfoService; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.InternalClusterInfoService; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -50,9 +49,9 @@ import org.elasticsearch.cluster.routing.AllocationId; import org.elasticsearch.cluster.routing.RestoreSource; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingHelper; -import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -70,6 +69,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.ShardLock; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.NodeServicesProvider; @@ -84,6 +84,7 @@ import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.mapper.internal.UidFieldMapper; +import org.elasticsearch.index.percolator.PercolatorQueryCache; import org.elasticsearch.index.snapshots.IndexShardRepository; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.store.Store; @@ -141,25 +142,25 @@ public class IndexShardTests extends ESSingleNodeTestCase { public void testWriteShardState() throws Exception { try (NodeEnvironment env = newNodeEnvironment()) { - ShardId id = new ShardId("foo", "_na_", 1); + ShardId id = new ShardId("foo", "fooUUID", 1); long version = between(1, Integer.MAX_VALUE / 2); boolean primary = randomBoolean(); AllocationId allocationId = randomBoolean() ? null : randomAllocationId(); - ShardStateMetaData state1 = new ShardStateMetaData(version, primary, "foo", allocationId); + ShardStateMetaData state1 = new ShardStateMetaData(version, primary, "fooUUID", allocationId); write(state1, env.availableShardPaths(id)); ShardStateMetaData shardStateMetaData = load(logger, env.availableShardPaths(id)); assertEquals(shardStateMetaData, state1); - ShardStateMetaData state2 = new ShardStateMetaData(version, primary, "foo", allocationId); + ShardStateMetaData state2 = new ShardStateMetaData(version, primary, "fooUUID", allocationId); write(state2, env.availableShardPaths(id)); shardStateMetaData = load(logger, env.availableShardPaths(id)); assertEquals(shardStateMetaData, state1); - ShardStateMetaData state3 = new ShardStateMetaData(version + 1, primary, "foo", allocationId); + ShardStateMetaData state3 = new ShardStateMetaData(version + 1, primary, "fooUUID", allocationId); write(state3, env.availableShardPaths(id)); shardStateMetaData = load(logger, env.availableShardPaths(id)); assertEquals(shardStateMetaData, state3); - assertEquals("foo", state3.indexUUID); + assertEquals("fooUUID", state3.indexUUID); } } @@ -167,7 +168,9 @@ public class IndexShardTests extends ESSingleNodeTestCase { createIndex("test"); ensureGreen(); NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class); - Path[] shardPaths = env.availableShardPaths(new ShardId("test", "_na_", 0)); + ClusterService cs = getInstanceFromNode(ClusterService.class); + final Index index = cs.state().metaData().index("test").getIndex(); + Path[] shardPaths = env.availableShardPaths(new ShardId(index, 0)); logger.info("--> paths: [{}]", (Object)shardPaths); // Should not be able to acquire the lock because it's already open try { @@ -179,7 +182,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { // Test without the regular shard lock to assume we can acquire it // (worst case, meaning that the shard lock could be acquired and // we're green to delete the shard's directory) - ShardLock sLock = new DummyShardLock(new ShardId("test", "_na_", 0)); + ShardLock sLock = new DummyShardLock(new ShardId(index, 0)); try { env.deleteShardDirectoryUnderLock(sLock, IndexSettingsModule.newIndexSettings("test", Settings.EMPTY)); fail("should not have been able to delete the directory"); @@ -193,7 +196,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); ShardStateMetaData shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId)); assertEquals(getShardStateMetadata(shard), shardStateMetaData); @@ -226,7 +229,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); // fail shard shard.failShard("test shard fail", new CorruptIndexException("", "")); @@ -281,7 +284,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)).get()); ensureGreen("test"); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexServiceSafe("test"); + IndexService indexService = indicesService.indexServiceSafe(resolveIndex("test")); IndexShard indexShard = indexService.getShardOrNull(0); client().admin().indices().prepareDelete("test").get(); assertThat(indexShard.getActiveOperationsCount(), equalTo(0)); @@ -303,7 +306,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)).get()); ensureGreen("test"); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexServiceSafe("test"); + IndexService indexService = indicesService.indexServiceSafe(resolveIndex("test")); IndexShard indexShard = indexService.getShardOrNull(0); assertEquals(0, indexShard.getActiveOperationsCount()); Releasable operation1 = indexShard.acquirePrimaryOperationLock(); @@ -320,11 +323,11 @@ public class IndexShardTests extends ESSingleNodeTestCase { client().prepareIndex("test", "test").setSource("{}").get(); ensureGreen("test"); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - indicesService.indexService("test").getShardOrNull(0).checkIdle(0); + indicesService.indexService(resolveIndex("test")).getShardOrNull(0).checkIdle(0); assertBusy(() -> { IndexStats indexStats = client().admin().indices().prepareStats("test").clear().get().getIndex("test"); assertNotNull(indexStats.getShards()[0].getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); - indicesService.indexService("test").getShardOrNull(0).checkIdle(0); + indicesService.indexService(resolveIndex("test")).getShardOrNull(0).checkIdle(0); } ); IndexStats indexStats = client().admin().indices().prepareStats("test").get().getIndex("test"); @@ -345,7 +348,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { ensureGreen(); client().prepareIndex("test", "bar", "1").setSource("{}").get(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); setDurability(shard, Translog.Durability.REQUEST); assertFalse(shard.getEngine().getTranslog().syncNeeded()); @@ -385,7 +388,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { client().prepareIndex("test", "test").setSource("{}").get(); ensureGreen("test"); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexShard test = indicesService.indexService("test").getShardOrNull(0); + IndexShard test = indicesService.indexService(resolveIndex("test")).getShardOrNull(0); assertEquals(versionCreated.luceneVersion, test.minimumCompatibleVersion()); client().prepareIndex("test", "test").setSource("{}").get(); assertEquals(versionCreated.luceneVersion, test.minimumCompatibleVersion()); @@ -396,7 +399,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { public void testUpdatePriority() { assertAcked(client().admin().indices().prepareCreate("test") .setSettings(IndexMetaData.SETTING_PRIORITY, 200)); - IndexService indexService = getInstanceFromNode(IndicesService.class).indexService("test"); + IndexService indexService = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); assertEquals(200, indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue()); client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_PRIORITY, 400).build()).get(); assertEquals(400, indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue()); @@ -410,7 +413,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { SearchResponse response = client().prepareSearch("test").get(); assertHitCount(response, 1L); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); ShardPath shardPath = shard.shardPath(); Path dataPath = shardPath.getDataPath(); @@ -530,9 +533,9 @@ public class IndexShardTests extends ESSingleNodeTestCase { createIndex("test"); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); - ShardStats stats = new ShardStats(shard.routingEntry(), shard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), shard, new CommonStatsFlags()), shard.commitStats()); + ShardStats stats = new ShardStats(shard.routingEntry(), shard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), test.cache().getPercolatorQueryCache(), shard, new CommonStatsFlags()), shard.commitStats()); assertEquals(shard.shardPath().getRootDataPath().toString(), stats.getDataPath()); assertEquals(shard.shardPath().getRootStatePath().toString(), stats.getStatePath()); assertEquals(shard.shardPath().isCustomDataPath(), stats.isCustomDataPath()); @@ -570,7 +573,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { ensureGreen(); client().prepareIndex("test_iol", "test", "0").setSource("{\"foo\" : \"bar\"}").setRefresh(true).get(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test_iol"); + IndexService test = indicesService.indexService(resolveIndex("test_iol")); IndexShard shard = test.getShardOrNull(0); AtomicInteger preIndex = new AtomicInteger(); AtomicInteger postIndex = new AtomicInteger(); @@ -669,7 +672,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { createIndex("test", settingsBuilder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.REQUEST).build()); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); assertFalse(shard.shouldFlush()); client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(133 /* size of the operation + header&footer*/, ByteSizeUnit.BYTES)).build()).get(); @@ -703,7 +706,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { createIndex("test"); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); final IndexShard shard = test.getShardOrNull(0); assertFalse(shard.shouldFlush()); client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(133/* size of the operation + header&footer*/, ByteSizeUnit.BYTES)).build()).get(); @@ -749,7 +752,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { ).get()); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); final IndexShard shard = test.getShardOrNull(0); CountDownLatch latch = new CountDownLatch(1); Thread recoveryThread = new Thread(() -> { @@ -779,7 +782,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { ).get()); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); final IndexShard shard = test.getShardOrNull(0); final int numThreads = randomIntBetween(2, 4); Thread[] indexThreads = new Thread[numThreads]; @@ -830,7 +833,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { createIndex("test"); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); final IndexShard shard = test.getShardOrNull(0); int translogOps = 1; client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get(); @@ -861,7 +864,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { createIndex("test"); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); final IndexShard shard = test.getShardOrNull(0); client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get(); if (randomBoolean()) { @@ -892,7 +895,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); DiscoveryNode localNode = new DiscoveryNode("foo", DummyTransportAddress.INSTANCE, Version.CURRENT); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); final IndexShard shard = test.getShardOrNull(0); client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get(); @@ -945,7 +948,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { createIndex("test"); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); final IndexShard shard = test.getShardOrNull(0); ShardRouting origRouting = shard.routingEntry(); assertThat(shard.state(), equalTo(IndexShardState.STARTED)); @@ -967,8 +970,8 @@ public class IndexShardTests extends ESSingleNodeTestCase { createIndex("test_target"); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); - IndexService test_target = indicesService.indexService("test_target"); + IndexService test = indicesService.indexService(resolveIndex("test")); + IndexService test_target = indicesService.indexService(resolveIndex("test_target")); final IndexShard test_shard = test.getShardOrNull(0); client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get(); @@ -1029,7 +1032,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { createIndex("test"); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexService("test"); + IndexService indexService = indicesService.indexService(resolveIndex("test")); IndexShard shard = indexService.getShardOrNull(0); client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}").setRefresh(true).get(); client().prepareIndex("test", "test", "1").setSource("{\"foobar\" : \"bar\"}").setRefresh(true).get(); @@ -1078,7 +1081,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { createIndex("test"); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexService("test"); + IndexService indexService = indicesService.indexService(resolveIndex("test")); IndexShard shard = indexService.getShardOrNull(0); client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}").setRefresh(true).get(); client().prepareIndex("test", "test", "1").setSource("{\"foobar\" : \"bar\"}").setRefresh(true).get(); @@ -1126,7 +1129,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { createIndex("test"); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexService("test"); + IndexService indexService = indicesService.indexService(resolveIndex("test")); IndexShard shard = indexService.getShardOrNull(0); client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}").setRefresh(true).get(); IndexSearcherWrapper wrapper = new IndexSearcherWrapper() { @@ -1156,7 +1159,11 @@ public class IndexShardTests extends ESSingleNodeTestCase { ShardRouting routing = new ShardRouting(shard.routingEntry()); shard.close("simon says", true); NodeServicesProvider indexServices = indexService.getIndexServices(); - IndexShard newShard = new IndexShard(shard.shardId(), indexService.getIndexSettings(), shard.shardPath(), shard.store(), indexService.cache(), indexService.mapperService(), indexService.similarityService(), indexService.fieldData(), shard.getEngineFactory(), indexService.getIndexEventListener(), wrapper, indexServices, indexService.getSearchSlowLog(), null, listeners); + IndexShard newShard = new IndexShard(shard.shardId(), indexService.getIndexSettings(), shard.shardPath(), + shard.store(), indexService.cache(), indexService.mapperService(), indexService.similarityService(), + indexService.fieldData(), shard.getEngineFactory(), indexService.getIndexEventListener(), wrapper, + indexServices, indexService.getSearchSlowLog(), null, listeners + ); ShardRoutingHelper.reinit(routing); newShard.updateRoutingEntry(routing, false); DiscoveryNode localNode = new DiscoveryNode("foo", DummyTransportAddress.INSTANCE, Version.CURRENT); @@ -1179,7 +1186,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { .endObject().endObject().endObject()).get(); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("testindexfortranslogsync"); + IndexService test = indicesService.indexService(resolveIndex("testindexfortranslogsync")); IndexShard shard = test.getShardOrNull(0); ShardRouting routing = new ShardRouting(shard.routingEntry()); test.removeShard(0, "b/c britta says so"); @@ -1206,7 +1213,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { .endObject().endObject().endObject()).get(); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("index"); + IndexService test = indicesService.indexService(resolveIndex("index")); IndexShard shard = test.getShardOrNull(0); ShardRouting routing = new ShardRouting(shard.routingEntry()); test.removeShard(0, "b/c britta says so"); @@ -1235,7 +1242,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { .endObject().endObject().endObject()).get(); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("index"); + IndexService test = indicesService.indexService(resolveIndex("index")); IndexShard shard = test.getShardOrNull(0); ShardRouting routing = new ShardRouting(shard.routingEntry()); test.removeShard(0, "b/c britta says so"); diff --git a/core/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java b/core/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java index 2a52e8c557c..537ce83d3d2 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.cluster.routing.AllocationId; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.index.Index; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; @@ -42,13 +43,13 @@ public class ShardPathTests extends ESTestCase { Settings.Builder builder = settingsBuilder().put(IndexMetaData.SETTING_INDEX_UUID, "0xDEADBEEF") .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT); Settings settings = builder.build(); - ShardId shardId = new ShardId("foo", "_na_", 0); + ShardId shardId = new ShardId("foo", "0xDEADBEEF", 0); Path[] paths = env.availableShardPaths(shardId); Path path = randomFrom(paths); ShardStateMetaData.FORMAT.write(new ShardStateMetaData(2, true, "0xDEADBEEF", AllocationId.newInitializing()), 2, path); ShardPath shardPath = ShardPath.loadShardPath(logger, env, shardId, IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings)); assertEquals(path, shardPath.getDataPath()); - assertEquals("0xDEADBEEF", shardPath.getIndexUUID()); + assertEquals("0xDEADBEEF", shardPath.getShardId().getIndex().getUUID()); assertEquals("foo", shardPath.getShardId().getIndexName()); assertEquals(path.resolve("translog"), shardPath.resolveTranslog()); assertEquals(path.resolve("index"), shardPath.resolveIndex()); @@ -57,14 +58,15 @@ public class ShardPathTests extends ESTestCase { public void testFailLoadShardPathOnMultiState() throws IOException { try (final NodeEnvironment env = newNodeEnvironment(settingsBuilder().build())) { - Settings.Builder builder = settingsBuilder().put(IndexMetaData.SETTING_INDEX_UUID, "0xDEADBEEF") + final String indexUUID = "0xDEADBEEF"; + Settings.Builder builder = settingsBuilder().put(IndexMetaData.SETTING_INDEX_UUID, indexUUID) .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT); Settings settings = builder.build(); - ShardId shardId = new ShardId("foo", "_na_", 0); + ShardId shardId = new ShardId("foo", indexUUID, 0); Path[] paths = env.availableShardPaths(shardId); assumeTrue("This test tests multi data.path but we only got one", paths.length > 1); int id = randomIntBetween(1, 10); - ShardStateMetaData.FORMAT.write(new ShardStateMetaData(id, true, "0xDEADBEEF", AllocationId.newInitializing()), id, paths); + ShardStateMetaData.FORMAT.write(new ShardStateMetaData(id, true, indexUUID, AllocationId.newInitializing()), id, paths); ShardPath.loadShardPath(logger, env, shardId, IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings)); fail("Expected IllegalStateException"); } catch (IllegalStateException e) { @@ -77,7 +79,7 @@ public class ShardPathTests extends ESTestCase { Settings.Builder builder = settingsBuilder().put(IndexMetaData.SETTING_INDEX_UUID, "foobar") .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT); Settings settings = builder.build(); - ShardId shardId = new ShardId("foo", "_na_", 0); + ShardId shardId = new ShardId("foo", "foobar", 0); Path[] paths = env.availableShardPaths(shardId); Path path = randomFrom(paths); int id = randomIntBetween(1, 10); @@ -90,9 +92,10 @@ public class ShardPathTests extends ESTestCase { } public void testIllegalCustomDataPath() { - final Path path = createTempDir().resolve("foo").resolve("0"); + Index index = new Index("foo", "foo"); + final Path path = createTempDir().resolve(index.getUUID()).resolve("0"); try { - new ShardPath(true, path, path, "foo", new ShardId("foo", "_na_", 0)); + new ShardPath(true, path, path, new ShardId(index, 0)); fail("Expected IllegalArgumentException"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), is("shard state path must be different to the data path when using custom data paths")); @@ -100,8 +103,9 @@ public class ShardPathTests extends ESTestCase { } public void testValidCtor() { - final Path path = createTempDir().resolve("foo").resolve("0"); - ShardPath shardPath = new ShardPath(false, path, path, "foo", new ShardId("foo", "_na_", 0)); + Index index = new Index("foo", "foo"); + final Path path = createTempDir().resolve(index.getUUID()).resolve("0"); + ShardPath shardPath = new ShardPath(false, path, path, new ShardId(index, 0)); assertFalse(shardPath.isCustomDataPath()); assertEquals(shardPath.getDataPath(), path); assertEquals(shardPath.getShardStatePath(), path); @@ -111,8 +115,9 @@ public class ShardPathTests extends ESTestCase { boolean useCustomDataPath = randomBoolean(); final Settings indexSettings; final Settings nodeSettings; + final String indexUUID = "0xDEADBEEF"; Settings.Builder indexSettingsBuilder = settingsBuilder() - .put(IndexMetaData.SETTING_INDEX_UUID, "0xDEADBEEF") + .put(IndexMetaData.SETTING_INDEX_UUID, indexUUID) .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT); final Path customPath; if (useCustomDataPath) { @@ -132,10 +137,10 @@ public class ShardPathTests extends ESTestCase { nodeSettings = Settings.EMPTY; } try (final NodeEnvironment env = newNodeEnvironment(nodeSettings)) { - ShardId shardId = new ShardId("foo", "_na_", 0); + ShardId shardId = new ShardId("foo", indexUUID, 0); Path[] paths = env.availableShardPaths(shardId); Path path = randomFrom(paths); - ShardStateMetaData.FORMAT.write(new ShardStateMetaData(2, true, "0xDEADBEEF", AllocationId.newInitializing()), 2, path); + ShardStateMetaData.FORMAT.write(new ShardStateMetaData(2, true, indexUUID, AllocationId.newInitializing()), 2, path); ShardPath shardPath = ShardPath.loadShardPath(logger, env, shardId, IndexSettingsModule.newIndexSettings(shardId.getIndex(), indexSettings)); boolean found = false; for (Path p : env.nodeDataPaths()) { diff --git a/core/src/test/java/org/elasticsearch/index/shard/ShardUtilsTests.java b/core/src/test/java/org/elasticsearch/index/shard/ShardUtilsTests.java index 105179a1f53..e960622d1c1 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/ShardUtilsTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/ShardUtilsTests.java @@ -39,7 +39,7 @@ public class ShardUtilsTests extends ESTestCase { IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); writer.commit(); ShardId id = new ShardId("foo", "_na_", random().nextInt()); - try (DirectoryReader reader = DirectoryReader.open(writer, random().nextBoolean())) { + try (DirectoryReader reader = DirectoryReader.open(writer)) { ElasticsearchDirectoryReader wrap = ElasticsearchDirectoryReader.wrap(reader, id); assertEquals(id, ShardUtils.extractShardId(wrap)); } @@ -53,7 +53,7 @@ public class ShardUtilsTests extends ESTestCase { } } - try (DirectoryReader reader = DirectoryReader.open(writer, random().nextBoolean())) { + try (DirectoryReader reader = DirectoryReader.open(writer)) { ElasticsearchDirectoryReader wrap = ElasticsearchDirectoryReader.wrap(reader, id); assertEquals(id, ShardUtils.extractShardId(wrap)); CompositeReaderContext context = wrap.getContext(); diff --git a/core/src/test/java/org/elasticsearch/index/shard/VersionFieldUpgraderTests.java b/core/src/test/java/org/elasticsearch/index/shard/VersionFieldUpgraderTests.java deleted file mode 100644 index 2fc02fb0503..00000000000 --- a/core/src/test/java/org/elasticsearch/index/shard/VersionFieldUpgraderTests.java +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.shard; - -import org.apache.lucene.analysis.CannedTokenStream; -import org.apache.lucene.analysis.Token; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.NumericDocValuesField; -import org.apache.lucene.document.TextField; -import org.apache.lucene.index.CodecReader; -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.DocValuesType; -import org.apache.lucene.index.FieldInfo; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.store.Directory; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.TestUtil; -import org.elasticsearch.common.Numbers; -import org.elasticsearch.index.mapper.internal.UidFieldMapper; -import org.elasticsearch.index.mapper.internal.VersionFieldMapper; -import org.elasticsearch.test.ESTestCase; - -/** Tests upgrading old document versions from _uid payloads to _version docvalues */ -public class VersionFieldUpgraderTests extends ESTestCase { - - /** Simple test: one doc in the old format, check that it looks correct */ - public void testUpgradeOneDocument() throws Exception { - Directory dir = newDirectory(); - IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(null)); - - // add a document with a _uid having a payload of 3 - Document doc = new Document(); - Token token = new Token("1", 0, 1); - token.setPayload(new BytesRef(Numbers.longToBytes(3))); - doc.add(new TextField(UidFieldMapper.NAME, new CannedTokenStream(token))); - iw.addDocument(doc); - iw.commit(); - - CodecReader reader = getOnlySegmentReader(DirectoryReader.open(iw, true)); - CodecReader upgraded = VersionFieldUpgrader.wrap(reader); - // we need to be upgraded, should be a different instance - assertNotSame(reader, upgraded); - - // make sure we can see our numericdocvalues in fieldinfos - FieldInfo versionField = upgraded.getFieldInfos().fieldInfo(VersionFieldMapper.NAME); - assertNotNull(versionField); - assertEquals(DocValuesType.NUMERIC, versionField.getDocValuesType()); - // should have a value of 3, and be visible in docsWithField - assertEquals(3, upgraded.getNumericDocValues(VersionFieldMapper.NAME).get(0)); - assertTrue(upgraded.getDocsWithField(VersionFieldMapper.NAME).get(0)); - - // verify filterreader with checkindex - TestUtil.checkReader(upgraded); - - reader.close(); - iw.close(); - dir.close(); - } - - /** test that we are a non-op if the segment already has the version field */ - public void testAlreadyUpgraded() throws Exception { - Directory dir = newDirectory(); - IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(null)); - - // add a document with a _uid having a payload of 3 - Document doc = new Document(); - Token token = new Token("1", 0, 1); - token.setPayload(new BytesRef(Numbers.longToBytes(3))); - doc.add(new TextField(UidFieldMapper.NAME, new CannedTokenStream(token))); - doc.add(new NumericDocValuesField(VersionFieldMapper.NAME, 3)); - iw.addDocument(doc); - iw.commit(); - - CodecReader reader = getOnlySegmentReader(DirectoryReader.open(iw, true)); - CodecReader upgraded = VersionFieldUpgrader.wrap(reader); - // we already upgraded: should be same instance - assertSame(reader, upgraded); - - reader.close(); - iw.close(); - dir.close(); - } - - /** Test upgrading two documents */ - public void testUpgradeTwoDocuments() throws Exception { - Directory dir = newDirectory(); - IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(null)); - - // add a document with a _uid having a payload of 3 - Document doc = new Document(); - Token token = new Token("1", 0, 1); - token.setPayload(new BytesRef(Numbers.longToBytes(3))); - doc.add(new TextField(UidFieldMapper.NAME, new CannedTokenStream(token))); - iw.addDocument(doc); - - doc = new Document(); - token = new Token("2", 0, 1); - token.setPayload(new BytesRef(Numbers.longToBytes(4))); - doc.add(new TextField(UidFieldMapper.NAME, new CannedTokenStream(token))); - iw.addDocument(doc); - - iw.commit(); - - CodecReader reader = getOnlySegmentReader(DirectoryReader.open(iw, true)); - CodecReader upgraded = VersionFieldUpgrader.wrap(reader); - // we need to be upgraded, should be a different instance - assertNotSame(reader, upgraded); - - // make sure we can see our numericdocvalues in fieldinfos - FieldInfo versionField = upgraded.getFieldInfos().fieldInfo(VersionFieldMapper.NAME); - assertNotNull(versionField); - assertEquals(DocValuesType.NUMERIC, versionField.getDocValuesType()); - // should have a values of 3 and 4, and be visible in docsWithField - assertEquals(3, upgraded.getNumericDocValues(VersionFieldMapper.NAME).get(0)); - assertEquals(4, upgraded.getNumericDocValues(VersionFieldMapper.NAME).get(1)); - assertTrue(upgraded.getDocsWithField(VersionFieldMapper.NAME).get(0)); - assertTrue(upgraded.getDocsWithField(VersionFieldMapper.NAME).get(1)); - - // verify filterreader with checkindex - TestUtil.checkReader(upgraded); - - reader.close(); - iw.close(); - dir.close(); - } -} diff --git a/core/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java b/core/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java new file mode 100644 index 00000000000..edb337fd4e6 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java @@ -0,0 +1,62 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.similarity; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.IndexSettingsModule; + +import java.util.Collections; + +public class SimilarityServiceTests extends ESTestCase { + + // Tests #16594 + public void testOverrideBuiltInSimilarity() { + Settings settings = Settings.builder().put("index.similarity.BM25.type", "classic").build(); + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", settings); + try { + new SimilarityService(indexSettings, Collections.emptyMap()); + fail("can't override bm25"); + } catch (IllegalArgumentException ex) { + assertEquals(ex.getMessage(), "Cannot redefine built-in Similarity [BM25]"); + } + } + + // Pre v3 indices could override built-in similarities + public void testOverrideBuiltInSimilarityPreV3() { + Settings settings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0) + .put("index.similarity.BM25.type", "classic") + .build(); + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", settings); + SimilarityService service = new SimilarityService(indexSettings, Collections.emptyMap()); + assertTrue(service.getSimilarity("BM25") instanceof ClassicSimilarityProvider); + } + + // Tests #16594 + public void testDefaultSimilarity() { + Settings settings = Settings.builder().put("index.similarity.default.type", "BM25").build(); + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", settings); + SimilarityService service = new SimilarityService(indexSettings, Collections.emptyMap()); + assertTrue(service.getDefaultSimilarity() instanceof BM25SimilarityProvider); + } +} diff --git a/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java b/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java index 4031aa5da25..aaa1671f84b 100644 --- a/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -49,6 +49,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.gateway.PrimaryShardAllocator; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.MergePolicyConfig; import org.elasticsearch.index.shard.IndexEventListener; @@ -571,8 +572,9 @@ public class CorruptedFileIT extends ESIntegTestCase { private Map> findFilesToCorruptForReplica() throws IOException { Map> filesToNodes = new HashMap<>(); ClusterState state = client().admin().cluster().prepareState().get().getState(); + Index test = state.metaData().index("test").getIndex(); for (ShardRouting shardRouting : state.getRoutingTable().allShards("test")) { - if (shardRouting.primary() == true) { + if (shardRouting.primary()) { continue; } assertTrue(shardRouting.assignedToNode()); @@ -582,8 +584,7 @@ public class CorruptedFileIT extends ESIntegTestCase { filesToNodes.put(nodeStats.getNode().getName(), files); for (FsInfo.Path info : nodeStats.getFs()) { String path = info.getPath(); - final String relativeDataLocationPath = "indices/test/" + Integer.toString(shardRouting.getId()) + "/index"; - Path file = PathUtils.get(path).resolve(relativeDataLocationPath); + Path file = PathUtils.get(path).resolve("indices").resolve(test.getUUID()).resolve(Integer.toString(shardRouting.getId())).resolve("index"); if (Files.exists(file)) { // multi data path might only have one path in use try (DirectoryStream stream = Files.newDirectoryStream(file)) { for (Path item : stream) { @@ -604,6 +605,7 @@ public class CorruptedFileIT extends ESIntegTestCase { private ShardRouting corruptRandomPrimaryFile(final boolean includePerCommitFiles) throws IOException { ClusterState state = client().admin().cluster().prepareState().get().getState(); + Index test = state.metaData().index("test").getIndex(); GroupShardsIterator shardIterators = state.getRoutingNodes().getRoutingTable().activePrimaryShardsGrouped(new String[]{"test"}, false); List iterators = iterableAsArrayList(shardIterators); ShardIterator shardIterator = RandomPicks.randomFrom(getRandom(), iterators); @@ -616,8 +618,7 @@ public class CorruptedFileIT extends ESIntegTestCase { Set files = new TreeSet<>(); // treeset makes sure iteration order is deterministic for (FsInfo.Path info : nodeStatses.getNodes()[0].getFs()) { String path = info.getPath(); - final String relativeDataLocationPath = "indices/test/" + Integer.toString(shardRouting.getId()) + "/index"; - Path file = PathUtils.get(path).resolve(relativeDataLocationPath); + Path file = PathUtils.get(path).resolve("indices").resolve(test.getUUID()).resolve(Integer.toString(shardRouting.getId())).resolve("index"); if (Files.exists(file)) { // multi data path might only have one path in use try (DirectoryStream stream = Files.newDirectoryStream(file)) { for (Path item : stream) { @@ -676,12 +677,13 @@ public class CorruptedFileIT extends ESIntegTestCase { public List listShardFiles(ShardRouting routing) throws IOException { NodesStatsResponse nodeStatses = client().admin().cluster().prepareNodesStats(routing.currentNodeId()).setFs(true).get(); - + ClusterState state = client().admin().cluster().prepareState().get().getState(); + final Index test = state.metaData().index("test").getIndex(); assertThat(routing.toString(), nodeStatses.getNodes().length, equalTo(1)); List files = new ArrayList<>(); for (FsInfo.Path info : nodeStatses.getNodes()[0].getFs()) { String path = info.getPath(); - Path file = PathUtils.get(path).resolve("indices/test/" + Integer.toString(routing.getId()) + "/index"); + Path file = PathUtils.get(path).resolve("indices/" + test.getUUID() + "/" + Integer.toString(routing.getId()) + "/index"); if (Files.exists(file)) { // multi data path might only have one path in use try (DirectoryStream stream = Files.newDirectoryStream(file)) { for (Path item : stream) { diff --git a/core/src/test/java/org/elasticsearch/index/store/CorruptedTranslogIT.java b/core/src/test/java/org/elasticsearch/index/store/CorruptedTranslogIT.java index ae158b87c59..0a8cd9a6fe0 100644 --- a/core/src/test/java/org/elasticsearch/index/store/CorruptedTranslogIT.java +++ b/core/src/test/java/org/elasticsearch/index/store/CorruptedTranslogIT.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.MockEngineFactoryPlugin; import org.elasticsearch.monitor.fs.FsInfo; @@ -110,6 +111,7 @@ public class CorruptedTranslogIT extends ESIntegTestCase { private void corruptRandomTranslogFiles() throws IOException { ClusterState state = client().admin().cluster().prepareState().get().getState(); GroupShardsIterator shardIterators = state.getRoutingNodes().getRoutingTable().activePrimaryShardsGrouped(new String[]{"test"}, false); + final Index test = state.metaData().index("test").getIndex(); List iterators = iterableAsArrayList(shardIterators); ShardIterator shardIterator = RandomPicks.randomFrom(getRandom(), iterators); ShardRouting shardRouting = shardIterator.nextOrNull(); @@ -121,7 +123,7 @@ public class CorruptedTranslogIT extends ESIntegTestCase { Set files = new TreeSet<>(); // treeset makes sure iteration order is deterministic for (FsInfo.Path fsPath : nodeStatses.getNodes()[0].getFs()) { String path = fsPath.getPath(); - final String relativeDataLocationPath = "indices/test/" + Integer.toString(shardRouting.getId()) + "/translog"; + final String relativeDataLocationPath = "indices/"+ test.getUUID() +"/" + Integer.toString(shardRouting.getId()) + "/translog"; Path file = PathUtils.get(path).resolve(relativeDataLocationPath); if (Files.exists(file)) { logger.info("--> path: {}", file); diff --git a/core/src/test/java/org/elasticsearch/index/store/FsDirectoryServiceTests.java b/core/src/test/java/org/elasticsearch/index/store/FsDirectoryServiceTests.java index 9da39b8da71..f7d793f03ed 100644 --- a/core/src/test/java/org/elasticsearch/index/store/FsDirectoryServiceTests.java +++ b/core/src/test/java/org/elasticsearch/index/store/FsDirectoryServiceTests.java @@ -46,9 +46,9 @@ public class FsDirectoryServiceTests extends ESTestCase { IndexSettings settings = IndexSettingsModule.newIndexSettings("foo", build); IndexStoreConfig config = new IndexStoreConfig(build); IndexStore store = new IndexStore(settings, config); - Path tempDir = createTempDir().resolve("foo").resolve("0"); + Path tempDir = createTempDir().resolve(settings.getUUID()).resolve("0"); Files.createDirectories(tempDir); - ShardPath path = new ShardPath(false, tempDir, tempDir, settings.getUUID(), new ShardId(settings.getIndex(), 0)); + ShardPath path = new ShardPath(false, tempDir, tempDir, new ShardId(settings.getIndex(), 0)); FsDirectoryService fsDirectoryService = new FsDirectoryService(settings, store, path); Directory directory = fsDirectoryService.newDirectory(); assertTrue(directory instanceof RateLimitedFSDirectory); @@ -62,9 +62,9 @@ public class FsDirectoryServiceTests extends ESTestCase { IndexSettings settings = IndexSettingsModule.newIndexSettings("foo", build); IndexStoreConfig config = new IndexStoreConfig(build); IndexStore store = new IndexStore(settings, config); - Path tempDir = createTempDir().resolve("foo").resolve("0"); + Path tempDir = createTempDir().resolve(settings.getUUID()).resolve("0"); Files.createDirectories(tempDir); - ShardPath path = new ShardPath(false, tempDir, tempDir, settings.getUUID(), new ShardId(settings.getIndex(), 0)); + ShardPath path = new ShardPath(false, tempDir, tempDir, new ShardId(settings.getIndex(), 0)); FsDirectoryService fsDirectoryService = new FsDirectoryService(settings, store, path); Directory directory = fsDirectoryService.newDirectory(); assertTrue(directory instanceof RateLimitedFSDirectory); diff --git a/core/src/test/java/org/elasticsearch/index/store/IndexStoreTests.java b/core/src/test/java/org/elasticsearch/index/store/IndexStoreTests.java index d9000e23a61..da5c1f3ecfa 100644 --- a/core/src/test/java/org/elasticsearch/index/store/IndexStoreTests.java +++ b/core/src/test/java/org/elasticsearch/index/store/IndexStoreTests.java @@ -47,13 +47,14 @@ import java.util.Locale; public class IndexStoreTests extends ESTestCase { public void testStoreDirectory() throws IOException { - final Path tempDir = createTempDir().resolve("foo").resolve("0"); + Index index = new Index("foo", "fooUUID"); + final Path tempDir = createTempDir().resolve(index.getUUID()).resolve("0"); final IndexModule.Type[] values = IndexModule.Type.values(); final IndexModule.Type type = RandomPicks.randomFrom(random(), values); Settings settings = Settings.settingsBuilder().put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), type.name().toLowerCase(Locale.ROOT)) .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("foo", settings); - FsDirectoryService service = new FsDirectoryService(indexSettings, null, new ShardPath(false, tempDir, tempDir, "foo", new ShardId("foo", "_na_", 0))); + FsDirectoryService service = new FsDirectoryService(indexSettings, null, new ShardPath(false, tempDir, tempDir, new ShardId(index, 0))); try (final Directory directory = service.newFSDirectory(tempDir, NoLockFactory.INSTANCE)) { switch (type) { case NIOFS: @@ -84,8 +85,9 @@ public class IndexStoreTests extends ESTestCase { } public void testStoreDirectoryDefault() throws IOException { - final Path tempDir = createTempDir().resolve("foo").resolve("0"); - FsDirectoryService service = new FsDirectoryService(IndexSettingsModule.newIndexSettings("foo", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build()), null, new ShardPath(false, tempDir, tempDir, "foo", new ShardId("foo", "_na_", 0))); + Index index = new Index("bar", "foo"); + final Path tempDir = createTempDir().resolve(index.getUUID()).resolve("0"); + FsDirectoryService service = new FsDirectoryService(IndexSettingsModule.newIndexSettings("bar", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build()), null, new ShardPath(false, tempDir, tempDir, new ShardId(index, 0))); try (final Directory directory = service.newFSDirectory(tempDir, NoLockFactory.INSTANCE)) { if (Constants.WINDOWS) { assertTrue(directory.toString(), directory instanceof MMapDirectory || directory instanceof SimpleFSDirectory); diff --git a/core/src/test/java/org/elasticsearch/index/store/StoreTests.java b/core/src/test/java/org/elasticsearch/index/store/StoreTests.java index 36fc5cf0717..eff41dd3ffe 100644 --- a/core/src/test/java/org/elasticsearch/index/store/StoreTests.java +++ b/core/src/test/java/org/elasticsearch/index/store/StoreTests.java @@ -294,67 +294,6 @@ public class StoreTests extends ESTestCase { IOUtils.close(verifyingOutput, dir); } - // TODO: remove this, its too fragile. just use a static old index instead. - private static final class OldSIMockingCodec extends FilterCodec { - - protected OldSIMockingCodec() { - super(new Lucene54Codec().getName(), new Lucene54Codec()); - } - - @Override - public SegmentInfoFormat segmentInfoFormat() { - final SegmentInfoFormat segmentInfoFormat = super.segmentInfoFormat(); - return new SegmentInfoFormat() { - @Override - public SegmentInfo read(Directory directory, String segmentName, byte[] segmentID, IOContext context) throws IOException { - return segmentInfoFormat.read(directory, segmentName, segmentID, context); - } - - // this sucks it's a full copy of Lucene50SegmentInfoFormat but hey I couldn't find a way to make it write 4_5_0 versions - // somebody was too paranoid when implementing this. ey rmuir, was that you? - go fix it :P - @Override - public void write(Directory dir, SegmentInfo si, IOContext ioContext) throws IOException { - final String fileName = IndexFileNames.segmentFileName(si.name, "", Lucene50SegmentInfoFormat.SI_EXTENSION); - si.addFile(fileName); - - boolean success = false; - try (IndexOutput output = dir.createOutput(fileName, ioContext)) { - CodecUtil.writeIndexHeader(output, - "Lucene50SegmentInfo", - 0, - si.getId(), - ""); - Version version = Version.LUCENE_4_5_0; // FOOOOOO!! - // Write the Lucene version that created this segment, since 3.1 - output.writeInt(version.major); - output.writeInt(version.minor); - output.writeInt(version.bugfix); - assert version.prerelease == 0; - output.writeInt(si.maxDoc()); - - output.writeByte((byte) (si.getUseCompoundFile() ? SegmentInfo.YES : SegmentInfo.NO)); - output.writeStringStringMap(si.getDiagnostics()); - Set files = si.files(); - for (String file : files) { - if (!IndexFileNames.parseSegmentName(file).equals(si.name)) { - throw new IllegalArgumentException("invalid files: expected segment=" + si.name + ", got=" + files); - } - } - output.writeStringSet(files); - output.writeStringStringMap(si.getAttributes()); - CodecUtil.writeFooter(output); - success = true; - } finally { - if (!success) { - // TODO: are we doing this outside of the tracking wrapper? why must SIWriter cleanup like this? - IOUtils.deleteFilesIgnoringExceptions(si.dir, fileName); - } - } - } - }; - } - } - public void testNewChecksums() throws IOException { final ShardId shardId = new ShardId("index", "_na_", 1); DirectoryService directoryService = new LuceneManagedDirectoryService(random()); @@ -381,7 +320,7 @@ public class StoreTests extends ESTestCase { } } if (random().nextBoolean()) { - DirectoryReader.open(writer, random().nextBoolean()).close(); // flush + DirectoryReader.open(writer).close(); // flush } Store.MetadataSnapshot metadata; // check before we committed @@ -472,32 +411,12 @@ public class StoreTests extends ESTestCase { } - final Adler32 adler32 = new Adler32(); final long luceneChecksum; try (IndexInput indexInput = dir.openInput("lucene_checksum.bin", IOContext.DEFAULT)) { assertEquals(luceneFileLength, indexInput.length()); luceneChecksum = CodecUtil.retrieveChecksum(indexInput); } - { // positive check - StoreFileMetaData lucene = new StoreFileMetaData("lucene_checksum.bin", luceneFileLength, Store.digestToString(luceneChecksum), Version.LUCENE_4_8_0); - assertTrue(Store.checkIntegrityNoException(lucene, dir)); - } - - { // negative check - wrong checksum - StoreFileMetaData lucene = new StoreFileMetaData("lucene_checksum.bin", luceneFileLength, Store.digestToString(luceneChecksum + 1), Version.LUCENE_4_8_0); - assertFalse(Store.checkIntegrityNoException(lucene, dir)); - } - - { // negative check - wrong length - StoreFileMetaData lucene = new StoreFileMetaData("lucene_checksum.bin", luceneFileLength + 1, Store.digestToString(luceneChecksum), Version.LUCENE_4_8_0); - assertFalse(Store.checkIntegrityNoException(lucene, dir)); - } - - { // negative check - wrong file - StoreFileMetaData lucene = new StoreFileMetaData("legacy.bin", luceneFileLength, Store.digestToString(luceneChecksum), Version.LUCENE_4_8_0); - assertFalse(Store.checkIntegrityNoException(lucene, dir)); - } dir.close(); } @@ -600,8 +519,6 @@ public class StoreTests extends ESTestCase { dir = StoreTests.newDirectory(random); if (dir instanceof MockDirectoryWrapper) { ((MockDirectoryWrapper) dir).setPreventDoubleWrite(preventDoubleWrite); - // TODO: fix this test to handle virus checker - ((MockDirectoryWrapper) dir).setEnableVirusScanner(false); } this.random = random; } @@ -859,28 +776,6 @@ public class StoreTests extends ESTestCase { IOUtils.close(store); } - public void testCleanUpWithLegacyChecksums() throws IOException { - Map metaDataMap = new HashMap<>(); - metaDataMap.put("segments_1", new StoreFileMetaData("segments_1", 50, "foobar", Version.LUCENE_4_8_0, new BytesRef(new byte[]{1}))); - metaDataMap.put("_0_1.del", new StoreFileMetaData("_0_1.del", 42, "foobarbaz", Version.LUCENE_4_8_0, new BytesRef())); - Store.MetadataSnapshot snapshot = new Store.MetadataSnapshot(unmodifiableMap(metaDataMap), emptyMap(), 0); - - final ShardId shardId = new ShardId("index", "_na_", 1); - DirectoryService directoryService = new LuceneManagedDirectoryService(random()); - Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId)); - for (String file : metaDataMap.keySet()) { - try (IndexOutput output = store.directory().createOutput(file, IOContext.DEFAULT)) { - BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024)); - output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length); - CodecUtil.writeFooter(output); - } - } - - store.verifyAfterCleanup(snapshot, snapshot); - deleteContent(store.directory()); - IOUtils.close(store); - } - public void testOnCloseCallback() throws IOException { final ShardId shardId = new ShardId(new Index(randomRealisticUnicodeOfCodepointLengthBetween(1, 10), "_na_"), randomIntBetween(0, 100)); DirectoryService directoryService = new LuceneManagedDirectoryService(random()); diff --git a/core/src/test/java/org/elasticsearch/index/suggest/stats/SuggestStatsIT.java b/core/src/test/java/org/elasticsearch/index/suggest/stats/SuggestStatsIT.java index 2b8ed9c3171..02573cc26b2 100644 --- a/core/src/test/java/org/elasticsearch/index/suggest/stats/SuggestStatsIT.java +++ b/core/src/test/java/org/elasticsearch/index/suggest/stats/SuggestStatsIT.java @@ -139,9 +139,9 @@ public class SuggestStatsIT extends ESIntegTestCase { private SuggestRequestBuilder addSuggestions(SuggestRequestBuilder request, int i) { for (int s = 0; s < randomIntBetween(2, 10); s++) { if (randomBoolean()) { - request.addSuggestion(new PhraseSuggestionBuilder("s" + s).field("f").text("test" + i + " test" + (i - 1))); + request.addSuggestion("s" + s, new PhraseSuggestionBuilder("f").text("test" + i + " test" + (i - 1))); } else { - request.addSuggestion(new TermSuggestionBuilder("s" + s).field("f").text("test" + i)); + request.addSuggestion("s" + s, new TermSuggestionBuilder("f").text("test" + i)); } } return request; diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 337d91356b9..984908ad9d6 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -1491,7 +1491,7 @@ public class TranslogTests extends ESTestCase { if (writtenOperations.size() != snapshot.totalOperations()) { for (int i = 0; i < threadCount; i++) { if (threadExceptions[i] != null) { - threadExceptions[i].printStackTrace(); + logger.info("Translog exception", threadExceptions[i]); } } } diff --git a/core/src/test/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java b/core/src/test/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java index 4853d59588b..905e45cf9f7 100644 --- a/core/src/test/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java +++ b/core/src/test/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java @@ -61,23 +61,23 @@ import static org.hamcrest.Matchers.nullValue; public class IndexLifecycleActionIT extends ESIntegTestCase { public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { Settings settings = settingsBuilder() + .put(indexSettings()) .put(SETTING_NUMBER_OF_SHARDS, 11) .put(SETTING_NUMBER_OF_REPLICAS, 1) - .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "0s") .build(); // start one server logger.info("Starting sever1"); - final String server_1 = internalCluster().startNode(settings); + final String server_1 = internalCluster().startNode(); final String node1 = getLocalNodeId(server_1); logger.info("Creating index [test]"); - CreateIndexResponse createIndexResponse = client().admin().indices().create(createIndexRequest("test")).actionGet(); + CreateIndexResponse createIndexResponse = client().admin().indices().create(createIndexRequest("test").settings(settings)).actionGet(); assertThat(createIndexResponse.isAcknowledged(), equalTo(true)); logger.info("Running Cluster Health"); ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet(); - logger.info("Done Cluster Health, status " + clusterHealth.getStatus()); + logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); @@ -87,12 +87,12 @@ public class IndexLifecycleActionIT extends ESIntegTestCase { logger.info("Starting server2"); // start another server - String server_2 = internalCluster().startNode(settings); + String server_2 = internalCluster().startNode(); // first wait for 2 nodes in the cluster logger.info("Running Cluster Health"); clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2")).actionGet(); - logger.info("Done Cluster Health, status " + clusterHealth.getStatus()); + logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); final String node2 = getLocalNodeId(server_2); @@ -122,7 +122,7 @@ public class IndexLifecycleActionIT extends ESIntegTestCase { logger.info("Starting server3"); // start another server - String server_3 = internalCluster().startNode(settings); + String server_3 = internalCluster().startNode(); // first wait for 3 nodes in the cluster clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("3")).actionGet(); @@ -171,7 +171,7 @@ public class IndexLifecycleActionIT extends ESIntegTestCase { // verify health logger.info("Running Cluster Health"); clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2")).actionGet(); - logger.info("Done Cluster Health, status " + clusterHealth.getStatus()); + logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); diff --git a/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java b/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java index afb9673508a..3316b52be2c 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java @@ -161,7 +161,7 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase { public void testShardAdditionAndRemoval() { createIndex("test", Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 3).put(SETTING_NUMBER_OF_REPLICAS, 0).build()); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); MockController controller = new MockController(Settings.builder() .put(IndexingMemoryController.INDEX_BUFFER_SIZE_SETTING, "4mb").build()); @@ -194,7 +194,7 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase { createIndex("test", Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 2).put(SETTING_NUMBER_OF_REPLICAS, 0).build()); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); MockController controller = new MockController(Settings.builder() .put(IndexingMemoryController.INDEX_BUFFER_SIZE_SETTING, "5mb") @@ -248,7 +248,7 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase { public void testThrottling() throws Exception { createIndex("test", Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 3).put(SETTING_NUMBER_OF_REPLICAS, 0).build()); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); MockController controller = new MockController(Settings.builder() .put(IndexingMemoryController.INDEX_BUFFER_SIZE_SETTING, "4mb").build()); @@ -281,7 +281,7 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase { controller.assertNotThrottled(shard0); controller.assertThrottled(shard1); - System.out.println("TEST: now index more"); + logger.info("--> Indexing more data"); // More indexing to shard0 controller.simulateIndexing(shard0); @@ -316,7 +316,7 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase { ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexService("index"); + IndexService indexService = indicesService.indexService(resolveIndex("index")); IndexShard shard = indexService.getShardOrNull(0); assertNotNull(shard); @@ -342,7 +342,7 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase { @Override protected long getIndexBufferRAMBytesUsed(IndexShard shard) { return shard.getIndexBufferRAMBytesUsed(); - } + } @Override protected void writeIndexingBufferAsync(IndexShard shard) { diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java index e34e1d6bd6b..367f4cd46ce 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java @@ -49,8 +49,9 @@ public class IndicesLifecycleListenerSingleNodeTests extends ESSingleNodeTestCas assertAcked(client().admin().indices().prepareCreate("test") .setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0)); ensureGreen(); - IndexMetaData metaData = indicesService.indexService("test").getMetaData(); - ShardRouting shardRouting = indicesService.indexService("test").getShard(0).routingEntry(); + Index idx = resolveIndex("test"); + IndexMetaData metaData = indicesService.indexService(idx).getMetaData(); + ShardRouting shardRouting = indicesService.indexService(idx).getShard(0).routingEntry(); final AtomicInteger counter = new AtomicInteger(1); IndexEventListener countingListener = new IndexEventListener() { @Override @@ -89,10 +90,11 @@ public class IndicesLifecycleListenerSingleNodeTests extends ESSingleNodeTestCas counter.incrementAndGet(); } }; - indicesService.deleteIndex("test", "simon says"); + indicesService.deleteIndex(idx, "simon says"); try { NodeServicesProvider nodeServicesProvider = getInstanceFromNode(NodeServicesProvider.class); IndexService index = indicesService.createIndex(nodeServicesProvider, metaData, Arrays.asList(countingListener)); + idx = index.index(); ShardRouting newRouting = new ShardRouting(shardRouting); String nodeId = newRouting.currentNodeId(); ShardRoutingHelper.moveToUnassigned(newRouting, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "boom")); @@ -106,7 +108,7 @@ public class IndicesLifecycleListenerSingleNodeTests extends ESSingleNodeTestCas ShardRoutingHelper.moveToStarted(newRouting); shard.updateRoutingEntry(newRouting, true); } finally { - indicesService.deleteIndex("test", "simon says"); + indicesService.deleteIndex(idx, "simon says"); } assertEquals(7, counter.get()); } diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java index 1d7d833c500..8ea053a64ab 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java @@ -37,6 +37,7 @@ import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder; import org.elasticsearch.action.percolate.MultiPercolateRequestBuilder; +import org.elasticsearch.action.percolate.MultiPercolateResponse; import org.elasticsearch.action.percolate.PercolateRequestBuilder; import org.elasticsearch.action.percolate.PercolateSourceBuilder; import org.elasticsearch.action.search.MultiSearchRequestBuilder; @@ -47,6 +48,7 @@ import org.elasticsearch.action.suggest.SuggestRequestBuilder; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.index.IndexNotFoundException; @@ -62,8 +64,10 @@ import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.startsWith; public class IndicesOptionsIntegrationIT extends ESIntegTestCase { @@ -642,9 +646,12 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase { return "a plugin that adds a dynamic tst setting"; } - private static final Setting INDEX_A = new Setting<>("index.a", "", Function.identity(), true, Setting.Scope.INDEX); - private static final Setting INDEX_C = new Setting<>("index.c", "", Function.identity(), true, Setting.Scope.INDEX); - private static final Setting INDEX_E = new Setting<>("index.e", "", Function.identity(), false, Setting.Scope.INDEX); + private static final Setting INDEX_A = + new Setting<>("index.a", "", Function.identity(), Property.Dynamic, Property.IndexScope); + private static final Setting INDEX_C = + new Setting<>("index.c", "", Function.identity(), Property.Dynamic, Property.IndexScope); + private static final Setting INDEX_E = + new Setting<>("index.e", "", Function.identity(), Property.IndexScope); public void onModule(SettingsModule module) { @@ -681,7 +688,7 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase { try { verify(client().admin().indices().prepareUpdateSettings("barbaz").setSettings(Settings.builder().put("e", "f")), false); } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), equalTo("Can't update non dynamic settings[[index.e]] for open indices [[barbaz]]")); + assertThat(e.getMessage(), startsWith("Can't update non dynamic settings [[index.e]] for open indices [[barbaz")); } verify(client().admin().indices().prepareUpdateSettings("baz*").setSettings(Settings.builder().put("a", "b")), true); } @@ -749,7 +756,7 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase { } private static SuggestRequestBuilder suggest(String... indices) { - return client().prepareSuggest(indices).addSuggestion(SuggestBuilders.termSuggestion("name").field("a")); + return client().prepareSuggest(indices).addSuggestion("name", SuggestBuilders.termSuggestion("a")); } private static GetAliasesRequestBuilder getAliases(String... indices) { @@ -788,7 +795,13 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase { if (requestBuilder instanceof MultiSearchRequestBuilder) { MultiSearchResponse multiSearchResponse = ((MultiSearchRequestBuilder) requestBuilder).get(); assertThat(multiSearchResponse.getResponses().length, equalTo(1)); + assertThat(multiSearchResponse.getResponses()[0].isFailure(), is(true)); assertThat(multiSearchResponse.getResponses()[0].getResponse(), nullValue()); + } else if (requestBuilder instanceof MultiPercolateRequestBuilder) { + MultiPercolateResponse multiPercolateResponse = ((MultiPercolateRequestBuilder) requestBuilder).get(); + assertThat(multiPercolateResponse.getItems().length, equalTo(1)); + assertThat(multiPercolateResponse.getItems()[0].isFailure(), is(true)); + assertThat(multiPercolateResponse.getItems()[0].getResponse(), nullValue()); } else { try { requestBuilder.get(); diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java index d6e248f1c94..5a4aa2e6b24 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java @@ -84,7 +84,7 @@ public class IndicesQueryCacheTests extends ESTestCase { Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig()); w.addDocument(new Document()); - DirectoryReader r = DirectoryReader.open(w, false); + DirectoryReader r = DirectoryReader.open(w); w.close(); ShardId shard = new ShardId("index", "_na_", 0); r = ElasticsearchDirectoryReader.wrap(r, shard); @@ -154,7 +154,7 @@ public class IndicesQueryCacheTests extends ESTestCase { Directory dir1 = newDirectory(); IndexWriter w1 = new IndexWriter(dir1, newIndexWriterConfig()); w1.addDocument(new Document()); - DirectoryReader r1 = DirectoryReader.open(w1, false); + DirectoryReader r1 = DirectoryReader.open(w1); w1.close(); ShardId shard1 = new ShardId("index", "_na_", 0); r1 = ElasticsearchDirectoryReader.wrap(r1, shard1); @@ -164,7 +164,7 @@ public class IndicesQueryCacheTests extends ESTestCase { Directory dir2 = newDirectory(); IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig()); w2.addDocument(new Document()); - DirectoryReader r2 = DirectoryReader.open(w2, false); + DirectoryReader r2 = DirectoryReader.open(w2); w2.close(); ShardId shard2 = new ShardId("index", "_na_", 1); r2 = ElasticsearchDirectoryReader.wrap(r2, shard2); @@ -279,7 +279,7 @@ public class IndicesQueryCacheTests extends ESTestCase { Directory dir1 = newDirectory(); IndexWriter w1 = new IndexWriter(dir1, newIndexWriterConfig()); w1.addDocument(new Document()); - DirectoryReader r1 = DirectoryReader.open(w1, false); + DirectoryReader r1 = DirectoryReader.open(w1); w1.close(); ShardId shard1 = new ShardId("index", "_na_", 0); r1 = ElasticsearchDirectoryReader.wrap(r1, shard1); @@ -289,7 +289,7 @@ public class IndicesQueryCacheTests extends ESTestCase { Directory dir2 = newDirectory(); IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig()); w2.addDocument(new Document()); - DirectoryReader r2 = DirectoryReader.open(w2, false); + DirectoryReader r2 = DirectoryReader.open(w2); w2.close(); ShardId shard2 = new ShardId("index", "_na_", 1); r2 = ElasticsearchDirectoryReader.wrap(r2, shard2); diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java b/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java index 1b2ad9c0a1e..94c41e5c84e 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java @@ -21,18 +21,19 @@ package org.elasticsearch.indices; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; -import org.elasticsearch.indices.IndicesRequestCache; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.test.ESIntegTestCase; import org.joda.time.DateTimeZone; - import java.util.List; import static org.elasticsearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; public class IndicesRequestCacheIT extends ESIntegTestCase { @@ -80,4 +81,156 @@ public class IndicesRequestCacheIT extends ESIntegTestCase { } } + public void testQueryRewrite() throws Exception { + assertAcked(client().admin().indices().prepareCreate("index").addMapping("type", "s", "type=text") + .setSettings(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true, + IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5, + IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .get()); + indexRandom(true, client().prepareIndex("index", "type", "1").setRouting("1").setSource("s", "a"), + client().prepareIndex("index", "type", "2").setRouting("1").setSource("s", "b"), + client().prepareIndex("index", "type", "3").setRouting("1").setSource("s", "c"), + client().prepareIndex("index", "type", "4").setRouting("2").setSource("s", "d"), + client().prepareIndex("index", "type", "5").setRouting("2").setSource("s", "e"), + client().prepareIndex("index", "type", "6").setRouting("2").setSource("s", "f"), + client().prepareIndex("index", "type", "7").setRouting("3").setSource("s", "g"), + client().prepareIndex("index", "type", "8").setRouting("3").setSource("s", "h"), + client().prepareIndex("index", "type", "9").setRouting("3").setSource("s", "i")); + ensureSearchable("index"); + + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), + equalTo(0L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), + equalTo(0L)); + + final SearchResponse r1 = client().prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) + .setQuery(QueryBuilders.rangeQuery("s").gte("a").lte("g")).get(); + assertSearchResponse(r1); + assertThat(r1.getHits().getTotalHits(), equalTo(7L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), + equalTo(0L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), + equalTo(5L)); + + final SearchResponse r2 = client().prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) + .setQuery(QueryBuilders.rangeQuery("s").gte("b").lte("h")).get(); + assertSearchResponse(r2); + assertThat(r2.getHits().getTotalHits(), equalTo(7L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), + equalTo(3L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), + equalTo(7L)); + + final SearchResponse r3 = client().prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) + .setQuery(QueryBuilders.rangeQuery("s").gte("c").lte("i")).get(); + assertSearchResponse(r3); + assertThat(r3.getHits().getTotalHits(), equalTo(7L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), + equalTo(6L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), + equalTo(9L)); + } + + public void testQueryRewriteMissingValues() throws Exception { + assertAcked(client().admin().indices().prepareCreate("index").addMapping("type", "s", "type=text") + .setSettings(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true, IndexMetaData.SETTING_NUMBER_OF_SHARDS, + 1, IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .get()); + indexRandom(true, client().prepareIndex("index", "type", "1").setSource("s", "a"), + client().prepareIndex("index", "type", "2").setSource("s", "b"), + client().prepareIndex("index", "type", "3").setSource("s", "c"), + client().prepareIndex("index", "type", "4").setSource("s", "d"), + client().prepareIndex("index", "type", "5").setSource("s", "e"), + client().prepareIndex("index", "type", "6").setSource("s", "f"), + client().prepareIndex("index", "type", "7").setSource("other", "value"), + client().prepareIndex("index", "type", "8").setSource("s", "h"), + client().prepareIndex("index", "type", "9").setSource("s", "i")); + ensureSearchable("index"); + + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), + equalTo(0L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), + equalTo(0L)); + + final SearchResponse r1 = client().prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) + .setQuery(QueryBuilders.rangeQuery("s").gte("a").lte("j")).get(); + assertSearchResponse(r1); + assertThat(r1.getHits().getTotalHits(), equalTo(8L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), + equalTo(0L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), + equalTo(1L)); + + final SearchResponse r2 = client().prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) + .setQuery(QueryBuilders.rangeQuery("s").gte("a").lte("j")).get(); + assertSearchResponse(r2); + assertThat(r2.getHits().getTotalHits(), equalTo(8L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), + equalTo(1L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), + equalTo(1L)); + + final SearchResponse r3 = client().prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) + .setQuery(QueryBuilders.rangeQuery("s").gte("a").lte("j")).get(); + assertSearchResponse(r3); + assertThat(r3.getHits().getTotalHits(), equalTo(8L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), + equalTo(2L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), + equalTo(1L)); + } + + public void testQueryRewriteDates() throws Exception { + assertAcked(client().admin().indices().prepareCreate("index").addMapping("type", "d", "type=date") + .setSettings(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true, + IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1, + IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .get()); + indexRandom(true, client().prepareIndex("index", "type", "1").setSource("d", "2014-01-01T00:00:00"), + client().prepareIndex("index", "type", "2").setSource("d", "2014-02-01T00:00:00"), + client().prepareIndex("index", "type", "3").setSource("d", "2014-03-01T00:00:00"), + client().prepareIndex("index", "type", "4").setSource("d", "2014-04-01T00:00:00"), + client().prepareIndex("index", "type", "5").setSource("d", "2014-05-01T00:00:00"), + client().prepareIndex("index", "type", "6").setSource("d", "2014-06-01T00:00:00"), + client().prepareIndex("index", "type", "7").setSource("d", "2014-07-01T00:00:00"), + client().prepareIndex("index", "type", "8").setSource("d", "2014-08-01T00:00:00"), + client().prepareIndex("index", "type", "9").setSource("d", "2014-09-01T00:00:00")); + ensureSearchable("index"); + + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), + equalTo(0L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), + equalTo(0L)); + + final SearchResponse r1 = client().prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) + .setQuery(QueryBuilders.rangeQuery("d").gte("2013-01-01T00:00:00").lte("now")) + .get(); + assertSearchResponse(r1); + assertThat(r1.getHits().getTotalHits(), equalTo(9L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), + equalTo(0L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), + equalTo(1L)); + + final SearchResponse r2 = client().prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) + .setQuery(QueryBuilders.rangeQuery("d").gte("2013-01-01T00:00:00").lte("now")) + .get(); + assertSearchResponse(r2); + assertThat(r2.getHits().getTotalHits(), equalTo(9L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), + equalTo(1L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), + equalTo(1L)); + + final SearchResponse r3 = client().prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) + .setQuery(QueryBuilders.rangeQuery("d").gte("2013-01-01T00:00:00").lte("now")) + .get(); + assertSearchResponse(r3); + assertThat(r3.getHits().getTotalHits(), equalTo(9L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), + equalTo(2L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), + equalTo(1L)); + } + } diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java index bd48a388f34..646d9651436 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java @@ -54,7 +54,7 @@ public class IndicesRequestCacheTests extends ESTestCase { IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); writer.addDocument(newDoc(0, "foo")); - DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer, true), + DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); AtomicBoolean indexShard = new AtomicBoolean(true); @@ -107,7 +107,7 @@ public class IndicesRequestCacheTests extends ESTestCase { IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); writer.addDocument(newDoc(0, "foo")); - DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer, true), + DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); TestEntity entity = new TestEntity(requestCacheStats, reader, indexShard, 0); @@ -144,12 +144,12 @@ public class IndicesRequestCacheTests extends ESTestCase { IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); writer.addDocument(newDoc(0, "foo")); - DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer, true), new ShardId("foo", "bar", 1)); + DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); TestEntity entity = new TestEntity(requestCacheStats, reader, indexShard, 0); writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); - DirectoryReader secondReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer, true), new ShardId("foo", "bar", 1)); + DirectoryReader secondReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); TestEntity secondEntity = new TestEntity(requestCacheStats, secondReader, indexShard, 0); // initial cache @@ -237,13 +237,13 @@ public class IndicesRequestCacheTests extends ESTestCase { IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); writer.addDocument(newDoc(0, "foo")); - DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer, true), + DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); TestEntity entity = new TestEntity(requestCacheStats, reader, indexShard, 0); writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); - DirectoryReader secondReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer, true), + DirectoryReader secondReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); TestEntity secondEntity = new TestEntity(requestCacheStats, secondReader, indexShard, 0); @@ -263,18 +263,18 @@ public class IndicesRequestCacheTests extends ESTestCase { IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); writer.addDocument(newDoc(0, "foo")); - DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer, true), + DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); TestEntity entity = new TestEntity(requestCacheStats, reader, indexShard, 0); writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); - DirectoryReader secondReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer, true), + DirectoryReader secondReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); TestEntity secondEntity = new TestEntity(requestCacheStats, secondReader, indexShard, 0); writer.updateDocument(new Term("id", "0"), newDoc(0, "baz")); - DirectoryReader thirdReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer, true), + DirectoryReader thirdReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); TestEntity thirddEntity = new TestEntity(requestCacheStats, thirdReader, indexShard, 0); @@ -282,7 +282,7 @@ public class IndicesRequestCacheTests extends ESTestCase { assertEquals("foo", value1.toUtf8()); BytesReference value2 = cache.getOrCompute(secondEntity, secondReader, termQuery.buildAsBytes()); assertEquals("bar", value2.toUtf8()); - logger.info(requestCacheStats.stats().getMemorySize().toString()); + logger.info("Memory size: {}", requestCacheStats.stats().getMemorySize()); BytesReference value3 = cache.getOrCompute(thirddEntity, thirdReader, termQuery.buildAsBytes()); assertEquals("baz", value3.toUtf8()); assertEquals(2, cache.count()); @@ -299,18 +299,18 @@ public class IndicesRequestCacheTests extends ESTestCase { IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); writer.addDocument(newDoc(0, "foo")); - DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer, true), + DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); TestEntity entity = new TestEntity(requestCacheStats, reader, indexShard, 0); writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); - DirectoryReader secondReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer, true), + DirectoryReader secondReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); TestEntity secondEntity = new TestEntity(requestCacheStats, secondReader, indexShard, 0); writer.updateDocument(new Term("id", "0"), newDoc(0, "baz")); - DirectoryReader thirdReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer, true), + DirectoryReader thirdReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); AtomicBoolean differentIdentity = new AtomicBoolean(true); TestEntity thirddEntity = new TestEntity(requestCacheStats, thirdReader, differentIdentity, 0); @@ -319,7 +319,7 @@ public class IndicesRequestCacheTests extends ESTestCase { assertEquals("foo", value1.toUtf8()); BytesReference value2 = cache.getOrCompute(secondEntity, secondReader, termQuery.buildAsBytes()); assertEquals("bar", value2.toUtf8()); - logger.info(requestCacheStats.stats().getMemorySize().toString()); + logger.info("Memory size: {}", requestCacheStats.stats().getMemorySize()); BytesReference value3 = cache.getOrCompute(thirddEntity, thirdReader, termQuery.buildAsBytes()); assertEquals("baz", value3.toUtf8()); assertEquals(3, cache.count()); diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java index e9f1f6be518..344c40a92ff 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java @@ -20,14 +20,13 @@ package org.elasticsearch.indices; import org.apache.lucene.store.LockObtainFailedException; import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.gateway.GatewayMetaState; -import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardId; @@ -73,12 +72,16 @@ public class IndicesServiceTests extends ESSingleNodeTestCase { IndexMetaData meta = IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas( 1).build(); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", meta.getSettings()); - assertFalse("no shard location", indicesService.canDeleteShardContent(new ShardId("test", "_na_", 0), indexSettings)); + ShardId shardId = new ShardId(meta.getIndex(), 0); + assertFalse("no shard location", indicesService.canDeleteShardContent(shardId, indexSettings)); IndexService test = createIndex("test"); + shardId = new ShardId(test.index(), 0); assertTrue(test.hasShard(0)); - assertFalse("shard is allocated", indicesService.canDeleteShardContent(new ShardId("test", "_na_", 0), indexSettings)); + assertFalse("shard is allocated", indicesService.canDeleteShardContent(shardId, test.getIndexSettings())); test.removeShard(0, "boom"); - assertTrue("shard is removed", indicesService.canDeleteShardContent(new ShardId("test", "_na_", 0), indexSettings)); + assertTrue("shard is removed", indicesService.canDeleteShardContent(shardId, test.getIndexSettings())); + ShardId notAllocated = new ShardId(test.index(), 100); + assertFalse("shard that was never on this node should NOT be deletable", indicesService.canDeleteShardContent(notAllocated, test.getIndexSettings())); } public void testDeleteIndexStore() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationIT.java index 09371c38dab..4597765c11c 100644 --- a/core/src/test/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationIT.java @@ -43,7 +43,6 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE) -@ESBackcompatTestCase.CompatibilityVersion(version = Version.V_1_2_0_ID) // we throw an exception if we create an index with _field_names that is 1.3 public class PreBuiltAnalyzerIntegrationIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { diff --git a/core/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java b/core/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java index 92b96d8e47d..23a197dbab6 100644 --- a/core/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java +++ b/core/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java @@ -19,15 +19,8 @@ package org.elasticsearch.indices.analyze; import org.elasticsearch.action.admin.indices.alias.Alias; -import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequestBuilder; import org.elasticsearch.action.admin.indices.analyze.AnalyzeResponse; -import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.rest.action.admin.indices.analyze.RestAnalyzeAction; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.core.IsNull; @@ -196,53 +189,6 @@ public class AnalyzeActionIT extends ESIntegTestCase { return randomBoolean() ? "test" : "alias"; } - public void testParseXContentForAnalyzeReuqest() throws Exception { - BytesReference content = XContentFactory.jsonBuilder() - .startObject() - .field("text", "THIS IS A TEST") - .field("tokenizer", "keyword") - .array("filters", "lowercase") - .endObject().bytes(); - - AnalyzeRequest analyzeRequest = new AnalyzeRequest("for test"); - - RestAnalyzeAction.buildFromContent(content, analyzeRequest, new ParseFieldMatcher(Settings.EMPTY)); - - assertThat(analyzeRequest.text().length, equalTo(1)); - assertThat(analyzeRequest.text(), equalTo(new String[]{"THIS IS A TEST"})); - assertThat(analyzeRequest.tokenizer(), equalTo("keyword")); - assertThat(analyzeRequest.tokenFilters(), equalTo(new String[]{"lowercase"})); - } - - public void testParseXContentForAnalyzeRequestWithInvalidJsonThrowsException() throws Exception { - AnalyzeRequest analyzeRequest = new AnalyzeRequest("for test"); - - try { - RestAnalyzeAction.buildFromContent(new BytesArray("{invalid_json}"), analyzeRequest, new ParseFieldMatcher(Settings.EMPTY)); - fail("shouldn't get here"); - } catch (Exception e) { - assertThat(e, instanceOf(IllegalArgumentException.class)); - assertThat(e.getMessage(), equalTo("Failed to parse request body")); - } - } - - public void testParseXContentForAnalyzeRequestWithUnknownParamThrowsException() throws Exception { - AnalyzeRequest analyzeRequest = new AnalyzeRequest("for test"); - BytesReference invalidContent =XContentFactory.jsonBuilder() - .startObject() - .field("text", "THIS IS A TEST") - .field("unknown", "keyword") - .endObject().bytes(); - - try { - RestAnalyzeAction.buildFromContent(invalidContent, analyzeRequest, new ParseFieldMatcher(Settings.EMPTY)); - fail("shouldn't get here"); - } catch (Exception e) { - assertThat(e, instanceOf(IllegalArgumentException.class)); - assertThat(e.getMessage(), startsWith("Unknown parameter [unknown]")); - } - } - public void testAnalyzerWithMultiValues() throws Exception { assertAcked(prepareCreate("test").addAlias(new Alias("alias"))); ensureGreen(); diff --git a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java b/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java index 239cb7a9096..da1bb7ae303 100644 --- a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java +++ b/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java @@ -18,10 +18,10 @@ */ package org.elasticsearch.indices.flush; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.index.IndexService; @@ -42,7 +42,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase { public void testModificationPreventsFlushing() throws InterruptedException { createIndex("test"); client().prepareIndex("test", "test", "1").setSource("{}").get(); - IndexService test = getInstanceFromNode(IndicesService.class).indexService("test"); + IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); @@ -86,7 +86,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase { public void testSingleShardSuccess() throws InterruptedException { createIndex("test"); client().prepareIndex("test", "test", "1").setSource("{}").get(); - IndexService test = getInstanceFromNode(IndicesService.class).indexService("test"); + IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); @@ -106,7 +106,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase { public void testSyncFailsIfOperationIsInFlight() throws InterruptedException { createIndex("test"); client().prepareIndex("test", "test", "1").setSource("{}").get(); - IndexService test = getInstanceFromNode(IndicesService.class).indexService("test"); + IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); @@ -126,7 +126,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase { public void testSyncFailsOnIndexClosedOrMissing() throws InterruptedException { createIndex("test"); - IndexService test = getInstanceFromNode(IndicesService.class).indexService("test"); + IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); @@ -159,7 +159,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase { public void testFailAfterIntermediateCommit() throws InterruptedException { createIndex("test"); client().prepareIndex("test", "test", "1").setSource("{}").get(); - IndexService test = getInstanceFromNode(IndicesService.class).indexService("test"); + IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); @@ -192,7 +192,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase { public void testFailWhenCommitIsMissing() throws InterruptedException { createIndex("test"); client().prepareIndex("test", "test", "1").setSource("{}").get(); - IndexService test = getInstanceFromNode(IndicesService.class).indexService("test"); + IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); diff --git a/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java index 0951f3c46df..8e064f46e12 100644 --- a/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java @@ -156,15 +156,15 @@ public class UpdateMappingIntegrationIT extends ESIntegTestCase { public void testUpdateMappingWithNormsConflicts() throws Exception { client().admin().indices().prepareCreate("test") - .addMapping("type", "{\"type\":{\"properties\":{\"body\":{\"type\":\"text\", \"norms\": { \"enabled\": false }}}}}") + .addMapping("type", "{\"type\":{\"properties\":{\"body\":{\"type\":\"text\", \"norms\": false }}}}") .execute().actionGet(); try { client().admin().indices().preparePutMapping("test").setType("type") - .setSource("{\"type\":{\"properties\":{\"body\":{\"type\":\"text\", \"norms\": { \"enabled\": true }}}}}").execute() + .setSource("{\"type\":{\"properties\":{\"body\":{\"type\":\"text\", \"norms\": true }}}}").execute() .actionGet(); fail("Expected MergeMappingException"); } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("mapper [body] has different [omit_norms]")); + assertThat(e.getMessage(), containsString("mapper [body] has different [norms]")); } } diff --git a/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java b/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java index 35ed7a2c657..6bea3217894 100644 --- a/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java +++ b/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java @@ -31,6 +31,7 @@ import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.client.Requests; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.common.unit.TimeValue; @@ -197,8 +198,10 @@ public class RandomExceptionCircuitBreakerIT extends ESIntegTestCase { // TODO: Generalize this class and add it as a utility public static class RandomExceptionDirectoryReaderWrapper extends MockEngineSupport.DirectoryReaderWrapper { - public static final Setting EXCEPTION_TOP_LEVEL_RATIO_SETTING = Setting.doubleSetting(EXCEPTION_TOP_LEVEL_RATIO_KEY, 0.1d, 0.0d, false, Setting.Scope.INDEX); - public static final Setting EXCEPTION_LOW_LEVEL_RATIO_SETTING = Setting.doubleSetting(EXCEPTION_LOW_LEVEL_RATIO_KEY, 0.1d, 0.0d, false, Setting.Scope.INDEX); + public static final Setting EXCEPTION_TOP_LEVEL_RATIO_SETTING = + Setting.doubleSetting(EXCEPTION_TOP_LEVEL_RATIO_KEY, 0.1d, 0.0d, Property.IndexScope); + public static final Setting EXCEPTION_LOW_LEVEL_RATIO_SETTING = + Setting.doubleSetting(EXCEPTION_LOW_LEVEL_RATIO_KEY, 0.1d, 0.0d, Property.IndexScope); public static class TestPlugin extends Plugin { @Override public String name() { diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 155032f1d8c..140ff153b9f 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -30,13 +30,14 @@ import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.index.Index; import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.store.Store; import org.elasticsearch.indices.IndicesService; @@ -261,14 +262,16 @@ public class IndexRecoveryIT extends ESIntegTestCase { .execute().actionGet().getState(); logger.info("--> waiting for recovery to start both on source and target"); + final Index index = resolveIndex(INDEX_NAME); assertBusy(new Runnable() { @Override public void run() { + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, nodeA); - assertThat(indicesService.indexServiceSafe(INDEX_NAME).getShard(0).recoveryStats().currentAsSource(), + assertThat(indicesService.indexServiceSafe(index).getShard(0).recoveryStats().currentAsSource(), equalTo(1)); indicesService = internalCluster().getInstance(IndicesService.class, nodeB); - assertThat(indicesService.indexServiceSafe(INDEX_NAME).getShard(0).recoveryStats().currentAsTarget(), + assertThat(indicesService.indexServiceSafe(index).getShard(0).recoveryStats().currentAsTarget(), equalTo(1)); } }); diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index b5f744ddc23..b69d1218546 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -94,7 +94,7 @@ public class RecoverySourceHandlerTests extends ESTestCase { @Override public void close() throws IOException { super.close(); - store.directory().sync(Collections.singleton(md.name())); // sync otherwise MDW will mess with it + targetStore.directory().sync(Collections.singleton(md.name())); // sync otherwise MDW will mess with it } }; } catch (IOException e) { diff --git a/core/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasIT.java b/core/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasIT.java index b257e3bcd5e..1a2f7e4ba18 100644 --- a/core/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasIT.java +++ b/core/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasIT.java @@ -48,7 +48,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase { assertAcked(prepareCreate("test", 2)); logger.info("Running Cluster Health"); ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); - logger.info("Done Cluster Health, status " + clusterHealth.getStatus()); + logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); NumShards numShards = getNumShards("test"); @@ -75,7 +75,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase { assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put("index.number_of_replicas", 2)).execute().actionGet()); logger.info("Running Cluster Health"); clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForActiveShards(numShards.numPrimaries * 2).execute().actionGet(); - logger.info("Done Cluster Health, status " + clusterHealth.getStatus()); + logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries)); @@ -88,7 +88,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase { logger.info("Running Cluster Health"); clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes(">=3").execute().actionGet(); - logger.info("Done Cluster Health, status " + clusterHealth.getStatus()); + logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries)); @@ -106,7 +106,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase { logger.info("Running Cluster Health"); clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes(">=3").execute().actionGet(); - logger.info("Done Cluster Health, status " + clusterHealth.getStatus()); + logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries)); @@ -128,7 +128,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase { logger.info("--> running cluster health"); ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 2).execute().actionGet(); - logger.info("--> done cluster health, status " + clusterHealth.getStatus()); + logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries)); @@ -140,7 +140,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase { logger.info("--> running cluster health"); clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 3).setWaitForNodes(">=3").execute().actionGet(); - logger.info("--> done cluster health, status " + clusterHealth.getStatus()); + logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries)); @@ -153,7 +153,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase { logger.info("--> running cluster health"); clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 2).setWaitForNodes(">=2").execute().actionGet(); - logger.info("--> done cluster health, status " + clusterHealth.getStatus()); + logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries)); @@ -166,7 +166,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase { logger.info("--> running cluster health"); clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes(">=1").setWaitForActiveShards(numShards.numPrimaries).execute().actionGet(); - logger.info("--> done cluster health, status " + clusterHealth.getStatus()); + logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries)); @@ -183,7 +183,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase { logger.info("--> running cluster health"); ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 2).execute().actionGet(); - logger.info("--> done cluster health, status " + clusterHealth.getStatus()); + logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries)); @@ -195,7 +195,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase { logger.info("--> running cluster health"); clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 3).execute().actionGet(); - logger.info("--> done cluster health, status " + clusterHealth.getStatus()); + logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries)); @@ -208,7 +208,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase { logger.info("--> running cluster health"); clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes(">=2").setWaitForActiveShards(numShards.numPrimaries * 2).execute().actionGet(); - logger.info("--> done cluster health, status " + clusterHealth.getStatus()); + logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries)); @@ -221,7 +221,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase { logger.info("--> running cluster health"); clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForNodes(">=1").setWaitForActiveShards(numShards.numPrimaries).execute().actionGet(); - logger.info("--> done cluster health, status " + clusterHealth.getStatus()); + logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries)); @@ -237,7 +237,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase { logger.info("--> running cluster health"); ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 3).execute().actionGet(); - logger.info("--> done cluster health, status " + clusterHealth.getStatus()); + logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries)); @@ -253,7 +253,7 @@ public class UpdateNumberOfReplicasIT extends ESIntegTestCase { logger.info("--> running cluster health"); clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 4).execute().actionGet(); - logger.info("--> done cluster health, status " + clusterHealth.getStatus()); + logger.info("--> done cluster health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries)); diff --git a/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java b/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java index 78d5e2203f5..642d646fe9e 100644 --- a/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java +++ b/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java @@ -65,7 +65,7 @@ public class UpdateSettingsIT extends ESIntegTestCase { IndexMetaData indexMetaData = client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test"); assertEquals(indexMetaData.getSettings().get("index.refresh_interval"), "-1"); for (IndicesService service : internalCluster().getInstances(IndicesService.class)) { - IndexService indexService = service.indexService("test"); + IndexService indexService = service.indexService(resolveIndex("test")); if (indexService != null) { assertEquals(indexService.getIndexSettings().getRefreshInterval().millis(), -1); assertEquals(indexService.getIndexSettings().getFlushThresholdSize().bytes(), 1024); @@ -79,7 +79,7 @@ public class UpdateSettingsIT extends ESIntegTestCase { indexMetaData = client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test"); assertNull(indexMetaData.getSettings().get("index.refresh_interval")); for (IndicesService service : internalCluster().getInstances(IndicesService.class)) { - IndexService indexService = service.indexService("test"); + IndexService indexService = service.indexService(resolveIndex("test")); if (indexService != null) { assertEquals(indexService.getIndexSettings().getRefreshInterval().millis(), 1000); assertEquals(indexService.getIndexSettings().getFlushThresholdSize().bytes(), 1024); @@ -137,7 +137,8 @@ public class UpdateSettingsIT extends ESIntegTestCase { .execute().actionGet(); fail("can't change number of replicas on a closed index"); } catch (IllegalArgumentException ex) { - assertEquals(ex.getMessage(), "Can't update [index.number_of_replicas] on closed indices [[test]] - can leave index in an unopenable state"); + assertTrue(ex.getMessage(), ex.getMessage().startsWith("Can't update [index.number_of_replicas] on closed indices [[test/")); + assertTrue(ex.getMessage(), ex.getMessage().endsWith("]] - can leave index in an unopenable state")); // expected } client().admin().indices().prepareUpdateSettings("test") diff --git a/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java b/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java index 8a9fa191854..49819e1180a 100644 --- a/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java +++ b/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java @@ -24,7 +24,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.cluster.ClusterInfo; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.block.ClusterBlocks; @@ -40,6 +39,7 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.DummyTransportAddress; @@ -47,6 +47,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.gateway.GatewayAllocator; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; @@ -376,12 +377,13 @@ public class RareClusterStateIT extends ESIntegTestCase { putMappingResponse.set(e); } }); + final Index index = resolveIndex("index"); // Wait for mappings to be available on master assertBusy(new Runnable() { @Override public void run() { final IndicesService indicesService = internalCluster().getInstance(IndicesService.class, master); - final IndexService indexService = indicesService.indexServiceSafe("index"); + final IndexService indexService = indicesService.indexServiceSafe(index); assertNotNull(indexService); final MapperService mapperService = indexService.mapperService(); DocumentMapper mapper = mapperService.documentMapper("type"); diff --git a/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java index e29ad3e081a..b7cb64a7d2f 100644 --- a/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -20,7 +20,6 @@ package org.elasticsearch.indices.stats; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; -import org.apache.lucene.util.Version; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; @@ -59,6 +58,7 @@ import java.io.IOException; import java.util.EnumSet; import java.util.Random; +import static org.elasticsearch.cluster.metadata.IndexMetaData.PROTO; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @@ -80,13 +80,23 @@ public class IndexStatsIT extends ESIntegTestCase { //Filter/Query cache is cleaned periodically, default is 60s, so make sure it runs often. Thread.sleep for 60s is bad return Settings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)) .put(IndicesService.INDICES_CACHE_CLEAN_INTERVAL_SETTING.getKey(), "1ms") - .put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), true) - .put(IndexModule.INDEX_QUERY_CACHE_TYPE_SETTING.getKey(), IndexModule.INDEX_QUERY_CACHE) .build(); } + @Override + public Settings indexSettings() { + return Settings.settingsBuilder().put(super.indexSettings()) + .put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), true) + .put(IndexModule.INDEX_QUERY_CACHE_TYPE_SETTING.getKey(), IndexModule.INDEX_QUERY_CACHE) + .build(); + } + + private Settings.Builder settingsBuilder() { + return Settings.builder().put(indexSettings()); + } + public void testFieldDataStats() { - client().admin().indices().prepareCreate("test").setSettings(Settings.settingsBuilder().put("index.number_of_shards", 2)).execute().actionGet(); + client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 2)).execute().actionGet(); ensureGreen(); client().prepareIndex("test", "type", "1").setSource("field", "value1", "field2", "value1").execute().actionGet(); client().prepareIndex("test", "type", "2").setSource("field", "value2", "field2", "value2").execute().actionGet(); @@ -131,7 +141,7 @@ public class IndexStatsIT extends ESIntegTestCase { public void testClearAllCaches() throws Exception { client().admin().indices().prepareCreate("test") - .setSettings(Settings.settingsBuilder().put("index.number_of_replicas", 0).put("index.number_of_shards", 2)) + .setSettings(settingsBuilder().put("index.number_of_replicas", 0).put("index.number_of_shards", 2)) .execute().actionGet(); ensureGreen(); client().admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); @@ -277,7 +287,7 @@ public class IndexStatsIT extends ESIntegTestCase { public void testNonThrottleStats() throws Exception { assertAcked(prepareCreate("test") - .setSettings(Settings.builder() + .setSettings(settingsBuilder() .put(IndexStore.INDEX_STORE_THROTTLE_TYPE_SETTING.getKey(), "merge") .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0") @@ -309,7 +319,7 @@ public class IndexStatsIT extends ESIntegTestCase { public void testThrottleStats() throws Exception { assertAcked(prepareCreate("test") - .setSettings(Settings.builder() + .setSettings(settingsBuilder() .put(IndexStore.INDEX_STORE_THROTTLE_TYPE_SETTING.getKey(), "merge") .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0") @@ -542,7 +552,6 @@ public class IndexStatsIT extends ESIntegTestCase { assertThat(stats.getTotal().getSegments(), notNullValue()); assertThat(stats.getTotal().getSegments().getCount(), equalTo((long) test1.totalNumShards)); - assumeTrue("test doesn't work with 4.6.0", org.elasticsearch.Version.CURRENT.luceneVersion != Version.LUCENE_4_6_0); assertThat(stats.getTotal().getSegments().getMemoryInBytes(), greaterThan(0L)); } @@ -652,7 +661,7 @@ public class IndexStatsIT extends ESIntegTestCase { public void testFlagOrdinalOrder() { Flag[] flags = new Flag[]{Flag.Store, Flag.Indexing, Flag.Get, Flag.Search, Flag.Merge, Flag.Flush, Flag.Refresh, - Flag.QueryCache, Flag.FieldData, Flag.Docs, Flag.Warmer, Flag.Percolate, Flag.Completion, Flag.Segments, + Flag.QueryCache, Flag.FieldData, Flag.Docs, Flag.Warmer, Flag.PercolatorCache, Flag.Completion, Flag.Segments, Flag.Translog, Flag.Suggest, Flag.RequestCache, Flag.Recovery}; assertThat(flags.length, equalTo(Flag.values().length)); @@ -893,7 +902,7 @@ public class IndexStatsIT extends ESIntegTestCase { case Warmer: builder.setWarmer(set); break; - case Percolate: + case PercolatorCache: builder.setPercolate(set); break; case Completion: @@ -944,8 +953,8 @@ public class IndexStatsIT extends ESIntegTestCase { return response.getStore() != null; case Warmer: return response.getWarmer() != null; - case Percolate: - return response.getPercolate() != null; + case PercolatorCache: + return response.getPercolatorCache() != null; case Completion: return response.getCompletion() != null; case Segments: @@ -990,7 +999,7 @@ public class IndexStatsIT extends ESIntegTestCase { } public void testFilterCacheStats() throws Exception { - assertAcked(prepareCreate("index").setSettings("number_of_replicas", 0).get()); + assertAcked(prepareCreate("index").setSettings(Settings.builder().put(indexSettings()).put("number_of_replicas", 0).build()).get()); indexRandom(true, client().prepareIndex("index", "type", "1").setSource("foo", "bar"), client().prepareIndex("index", "type", "2").setSource("foo", "baz")); diff --git a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java index 948c005bf33..84d86324fee 100644 --- a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java @@ -21,7 +21,6 @@ package org.elasticsearch.indices.store; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.health.ClusterHealthStatus; @@ -36,6 +35,7 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; @@ -112,12 +112,14 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)) ); ensureGreen("test"); + ClusterState state = client().admin().cluster().prepareState().get().getState(); + Index index = state.metaData().index("test").getIndex(); logger.info("--> making sure that shard and its replica are allocated on node_1 and node_2"); - assertThat(Files.exists(shardDirectory(node_1, "test", 0)), equalTo(true)); - assertThat(Files.exists(indexDirectory(node_1, "test")), equalTo(true)); - assertThat(Files.exists(shardDirectory(node_2, "test", 0)), equalTo(true)); - assertThat(Files.exists(indexDirectory(node_2, "test")), equalTo(true)); + assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(true)); + assertThat(Files.exists(indexDirectory(node_1, index)), equalTo(true)); + assertThat(Files.exists(shardDirectory(node_2, index, 0)), equalTo(true)); + assertThat(Files.exists(indexDirectory(node_2, index)), equalTo(true)); logger.info("--> starting node server3"); final String node_3 = internalCluster().startNode(Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false)); @@ -128,12 +130,12 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { .get(); assertThat(clusterHealth.isTimedOut(), equalTo(false)); - assertThat(Files.exists(shardDirectory(node_1, "test", 0)), equalTo(true)); - assertThat(Files.exists(indexDirectory(node_1, "test")), equalTo(true)); - assertThat(Files.exists(shardDirectory(node_2, "test", 0)), equalTo(true)); - assertThat(Files.exists(indexDirectory(node_2, "test")), equalTo(true)); - assertThat(Files.exists(shardDirectory(node_3, "test", 0)), equalTo(false)); - assertThat(Files.exists(indexDirectory(node_3, "test")), equalTo(false)); + assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(true)); + assertThat(Files.exists(indexDirectory(node_1, index)), equalTo(true)); + assertThat(Files.exists(shardDirectory(node_2, index, 0)), equalTo(true)); + assertThat(Files.exists(indexDirectory(node_2, index)), equalTo(true)); + assertThat(Files.exists(shardDirectory(node_3, index, 0)), equalTo(false)); + assertThat(Files.exists(indexDirectory(node_3, index)), equalTo(false)); logger.info("--> move shard from node_1 to node_3, and wait for relocation to finish"); @@ -161,12 +163,12 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { .get(); assertThat(clusterHealth.isTimedOut(), equalTo(false)); - assertThat(waitForShardDeletion(node_1, "test", 0), equalTo(false)); - assertThat(waitForIndexDeletion(node_1, "test"), equalTo(false)); - assertThat(Files.exists(shardDirectory(node_2, "test", 0)), equalTo(true)); - assertThat(Files.exists(indexDirectory(node_2, "test")), equalTo(true)); - assertThat(Files.exists(shardDirectory(node_3, "test", 0)), equalTo(true)); - assertThat(Files.exists(indexDirectory(node_3, "test")), equalTo(true)); + assertThat(waitForShardDeletion(node_1, index, 0), equalTo(false)); + assertThat(waitForIndexDeletion(node_1, index), equalTo(false)); + assertThat(Files.exists(shardDirectory(node_2, index, 0)), equalTo(true)); + assertThat(Files.exists(indexDirectory(node_2, index)), equalTo(true)); + assertThat(Files.exists(shardDirectory(node_3, index, 0)), equalTo(true)); + assertThat(Files.exists(indexDirectory(node_3, index)), equalTo(true)); } @@ -180,16 +182,18 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)) ); ensureGreen("test"); - assertThat(Files.exists(shardDirectory(node_1, "test", 0)), equalTo(true)); - assertThat(Files.exists(indexDirectory(node_1, "test")), equalTo(true)); + ClusterState state = client().admin().cluster().prepareState().get().getState(); + Index index = state.metaData().index("test").getIndex(); + assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(true)); + assertThat(Files.exists(indexDirectory(node_1, index)), equalTo(true)); final String node_2 = internalCluster().startDataOnlyNode(Settings.builder().build()); assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut()); - assertThat(Files.exists(shardDirectory(node_1, "test", 0)), equalTo(true)); - assertThat(Files.exists(indexDirectory(node_1, "test")), equalTo(true)); - assertThat(Files.exists(shardDirectory(node_2, "test", 0)), equalTo(false)); - assertThat(Files.exists(indexDirectory(node_2, "test")), equalTo(false)); + assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(true)); + assertThat(Files.exists(indexDirectory(node_1, index)), equalTo(true)); + assertThat(Files.exists(shardDirectory(node_2, index, 0)), equalTo(false)); + assertThat(Files.exists(indexDirectory(node_2, index)), equalTo(false)); // add a transport delegate that will prevent the shard active request to succeed the first time after relocation has finished. // node_1 will then wait for the next cluster state change before it tries a next attempt to delete the shard. @@ -220,14 +224,14 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { // it must still delete the shard, even if it cannot find it anymore in indicesservice client().admin().indices().prepareDelete("test").get(); - assertThat(waitForShardDeletion(node_1, "test", 0), equalTo(false)); - assertThat(waitForIndexDeletion(node_1, "test"), equalTo(false)); - assertThat(Files.exists(shardDirectory(node_1, "test", 0)), equalTo(false)); - assertThat(Files.exists(indexDirectory(node_1, "test")), equalTo(false)); - assertThat(waitForShardDeletion(node_2, "test", 0), equalTo(false)); - assertThat(waitForIndexDeletion(node_2, "test"), equalTo(false)); - assertThat(Files.exists(shardDirectory(node_2, "test", 0)), equalTo(false)); - assertThat(Files.exists(indexDirectory(node_2, "test")), equalTo(false)); + assertThat(waitForShardDeletion(node_1, index, 0), equalTo(false)); + assertThat(waitForIndexDeletion(node_1, index), equalTo(false)); + assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(false)); + assertThat(Files.exists(indexDirectory(node_1, index)), equalTo(false)); + assertThat(waitForShardDeletion(node_2, index, 0), equalTo(false)); + assertThat(waitForIndexDeletion(node_2, index), equalTo(false)); + assertThat(Files.exists(shardDirectory(node_2, index, 0)), equalTo(false)); + assertThat(Files.exists(indexDirectory(node_2, index)), equalTo(false)); } public void testShardsCleanup() throws Exception { @@ -241,9 +245,11 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { ); ensureGreen("test"); + ClusterState state = client().admin().cluster().prepareState().get().getState(); + Index index = state.metaData().index("test").getIndex(); logger.info("--> making sure that shard and its replica are allocated on node_1 and node_2"); - assertThat(Files.exists(shardDirectory(node_1, "test", 0)), equalTo(true)); - assertThat(Files.exists(shardDirectory(node_2, "test", 0)), equalTo(true)); + assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(true)); + assertThat(Files.exists(shardDirectory(node_2, index, 0)), equalTo(true)); logger.info("--> starting node server3"); String node_3 = internalCluster().startNode(); @@ -255,9 +261,9 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { assertThat(clusterHealth.isTimedOut(), equalTo(false)); logger.info("--> making sure that shard is not allocated on server3"); - assertThat(waitForShardDeletion(node_3, "test", 0), equalTo(false)); + assertThat(waitForShardDeletion(node_3, index, 0), equalTo(false)); - Path server2Shard = shardDirectory(node_2, "test", 0); + Path server2Shard = shardDirectory(node_2, index, 0); logger.info("--> stopping node " + node_2); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node_2)); @@ -268,14 +274,14 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { .setWaitForRelocatingShards(0) .get(); assertThat(clusterHealth.isTimedOut(), equalTo(false)); - logger.info("--> done cluster_health, status " + clusterHealth.getStatus()); + logger.info("--> done cluster_health, status {}", clusterHealth.getStatus()); assertThat(Files.exists(server2Shard), equalTo(true)); logger.info("--> making sure that shard and its replica exist on server1, server2 and server3"); - assertThat(Files.exists(shardDirectory(node_1, "test", 0)), equalTo(true)); + assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(true)); assertThat(Files.exists(server2Shard), equalTo(true)); - assertThat(Files.exists(shardDirectory(node_3, "test", 0)), equalTo(true)); + assertThat(Files.exists(shardDirectory(node_3, index, 0)), equalTo(true)); logger.info("--> starting node node_4"); final String node_4 = internalCluster().startNode(); @@ -284,9 +290,9 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { ensureGreen(); logger.info("--> making sure that shard and its replica are allocated on server1 and server3 but not on server2"); - assertThat(Files.exists(shardDirectory(node_1, "test", 0)), equalTo(true)); - assertThat(Files.exists(shardDirectory(node_3, "test", 0)), equalTo(true)); - assertThat(waitForShardDeletion(node_4, "test", 0), equalTo(false)); + assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(true)); + assertThat(Files.exists(shardDirectory(node_3, index, 0)), equalTo(true)); + assertThat(waitForShardDeletion(node_4, index, 0), equalTo(false)); } public void testShardActiveElsewhereDoesNotDeleteAnother() throws Exception { @@ -303,6 +309,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { final String node4 = nodesFutures.get().get(3); assertAcked(prepareCreate("test").setSettings(Settings.builder() + .put(indexSettings()) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 3) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) .put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "_name", node4) @@ -336,10 +343,11 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { // we have to do this in two steps as we now do async shard fetching before assigning, so the change to the // allocation filtering may not have immediate effect // TODO: we should add an easier to do this. It's too much of a song and dance.. + Index index = resolveIndex("test"); assertBusy(new Runnable() { @Override public void run() { - assertTrue(internalCluster().getInstance(IndicesService.class, node4).hasIndex("test")); + assertTrue(internalCluster().getInstance(IndicesService.class, node4).hasIndex(index)); } }); @@ -425,30 +433,30 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { waitNoPendingTasksOnAll(); logger.info("Checking if shards aren't removed"); for (int shard : node2Shards) { - assertTrue(waitForShardDeletion(nonMasterNode, "test", shard)); + assertTrue(waitForShardDeletion(nonMasterNode, index, shard)); } } - private Path indexDirectory(String server, String index) { + private Path indexDirectory(String server, Index index) { NodeEnvironment env = internalCluster().getInstance(NodeEnvironment.class, server); final Path[] paths = env.indexPaths(index); assert paths.length == 1; return paths[0]; } - private Path shardDirectory(String server, String index, int shard) { + private Path shardDirectory(String server, Index index, int shard) { NodeEnvironment env = internalCluster().getInstance(NodeEnvironment.class, server); - final Path[] paths = env.availableShardPaths(new ShardId(index, "_na_", shard)); + final Path[] paths = env.availableShardPaths(new ShardId(index, shard)); assert paths.length == 1; return paths[0]; } - private boolean waitForShardDeletion(final String server, final String index, final int shard) throws InterruptedException { + private boolean waitForShardDeletion(final String server, final Index index, final int shard) throws InterruptedException { awaitBusy(() -> !Files.exists(shardDirectory(server, index, shard))); return Files.exists(shardDirectory(server, index, shard)); } - private boolean waitForIndexDeletion(final String server, final String index) throws InterruptedException { + private boolean waitForIndexDeletion(final String server, final Index index) throws InterruptedException { awaitBusy(() -> !Files.exists(indexDirectory(server, index))); return Files.exists(indexDirectory(server, index)); } diff --git a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java index e909af62668..badcbde193b 100644 --- a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java +++ b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java @@ -30,19 +30,25 @@ import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.cluster.TestClusterService; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.junit.After; +import org.junit.AfterClass; import org.junit.Before; +import org.junit.BeforeClass; import java.util.Arrays; import java.util.HashSet; import java.util.Set; +import java.util.concurrent.TimeUnit; import static org.elasticsearch.Version.CURRENT; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.createClusterService; import static org.elasticsearch.test.VersionUtils.randomVersion; /** @@ -57,13 +63,35 @@ public class IndicesStoreTests extends ESTestCase { NOT_STARTED_STATES = set.toArray(new ShardRoutingState[set.size()]); } + private static ThreadPool threadPool; + private IndicesStore indicesStore; private DiscoveryNode localNode; + private ClusterService clusterService; + + @BeforeClass + public static void beforeClass() { + threadPool = new ThreadPool("ShardReplicationTests"); + } + + @AfterClass + public static void afterClass() { + ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); + threadPool = null; + } + @Before public void before() { localNode = new DiscoveryNode("abc", new LocalTransportAddress("abc"), Version.CURRENT); - indicesStore = new IndicesStore(Settings.EMPTY, null, new TestClusterService(), new TransportService(null, null), null); + clusterService = createClusterService(threadPool); + indicesStore = new IndicesStore(Settings.EMPTY, null, clusterService, new TransportService(null, null), null); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + clusterService.close(); } public void testShardCanBeDeletedNoShardRouting() throws Exception { @@ -87,7 +115,7 @@ public class IndicesStoreTests extends ESTestCase { for (int i = 0; i < numShards; i++) { int unStartedShard = randomInt(numReplicas); - for (int j=0; j <= numReplicas; j++) { + for (int j = 0; j <= numReplicas; j++) { ShardRoutingState state; if (j == unStartedShard) { state = randomFrom(NOT_STARTED_STATES); diff --git a/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java b/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java index cce687fcec3..d14a411c332 100644 --- a/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java +++ b/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java @@ -131,7 +131,7 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase { .addField("field1").addField("field2") .execute().actionGet(); if (searchResponse.getFailedShards() > 0) { - logger.warn("failed search " + Arrays.toString(searchResponse.getShardFailures())); + logger.warn("failed search {}", Arrays.toString(searchResponse.getShardFailures())); } assertHitCount(searchResponse, 1); assertThat(searchResponse.getHits().getAt(0).field("field1").value().toString(), equalTo("value1")); diff --git a/core/src/test/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java b/core/src/test/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java new file mode 100644 index 00000000000..a415b0992a7 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java @@ -0,0 +1,114 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.ingest.WritePipelineResponse; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.ingest.core.Pipeline; +import org.elasticsearch.node.service.NodeService; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +@ESIntegTestCase.ClusterScope(numDataNodes = 0, numClientNodes = 0, scope = ESIntegTestCase.Scope.TEST) +public class IngestProcessorNotInstalledOnAllNodesIT extends ESIntegTestCase { + + private final BytesReference pipelineSource; + private volatile boolean installPlugin; + + public IngestProcessorNotInstalledOnAllNodesIT() throws IOException { + pipelineSource = jsonBuilder().startObject() + .startArray("processors") + .startObject() + .startObject("test") + .endObject() + .endObject() + .endArray() + .endObject().bytes(); + } + + @Override + protected Collection> nodePlugins() { + return installPlugin ? pluginList(IngestClientIT.IngestPlugin.class) : Collections.emptyList(); + } + + @Override + protected Collection> getMockPlugins() { + return Collections.singletonList(TestSeedPlugin.class); + } + + public void testFailPipelineCreation() throws Exception { + installPlugin = true; + String node1 = internalCluster().startNode(); + installPlugin = false; + String node2 = internalCluster().startNode(); + ensureStableCluster(2, node1); + ensureStableCluster(2, node2); + + try { + client().admin().cluster().preparePutPipeline("_id", pipelineSource).get(); + fail("exception expected"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("Processor type [test] is not installed on node")); + } + } + + public void testFailPipelineCreationProcessorNotInstalledOnMasterNode() throws Exception { + internalCluster().startNode(); + installPlugin = true; + internalCluster().startNode(); + + try { + client().admin().cluster().preparePutPipeline("_id", pipelineSource).get(); + fail("exception expected"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), equalTo("No processor type exists with name [test]")); + } + } + + // If there is pipeline defined and a node joins that doesn't have the processor installed then + // that pipeline can't be used on this node. + public void testFailStartNode() throws Exception { + installPlugin = true; + String node1 = internalCluster().startNode(); + + WritePipelineResponse response = client().admin().cluster().preparePutPipeline("_id", pipelineSource).get(); + assertThat(response.isAcknowledged(), is(true)); + Pipeline pipeline = internalCluster().getInstance(NodeService.class, node1).getIngestService().getPipelineStore().get("_id"); + assertThat(pipeline, notNullValue()); + + installPlugin = false; + String node2 = internalCluster().startNode(); + pipeline = internalCluster().getInstance(NodeService.class, node2).getIngestService().getPipelineStore().get("_id"); + assertThat(pipeline, nullValue()); + } + +} diff --git a/core/src/test/java/org/elasticsearch/ingest/IngestStatsTests.java b/core/src/test/java/org/elasticsearch/ingest/IngestStatsTests.java new file mode 100644 index 00000000000..e7064b7e449 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/ingest/IngestStatsTests.java @@ -0,0 +1,69 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest; + +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; + +public class IngestStatsTests extends ESTestCase { + + public void testSerialization() throws IOException { + IngestStats.Stats total = new IngestStats.Stats(5, 10, 20, 30); + IngestStats.Stats foo = new IngestStats.Stats(50, 100, 200, 300); + IngestStats ingestStats = new IngestStats(total, Collections.singletonMap("foo", foo)); + IngestStats serialize = serialize(ingestStats); + assertNotSame(serialize, ingestStats); + assertNotSame(serialize.getTotalStats(), total); + assertEquals(total.getIngestCount(), serialize.getTotalStats().getIngestCount()); + assertEquals(total.getIngestFailedCount(), serialize.getTotalStats().getIngestFailedCount()); + assertEquals(total.getIngestTimeInMillis(), serialize.getTotalStats().getIngestTimeInMillis()); + assertEquals(total.getIngestCurrent(), serialize.getTotalStats().getIngestCurrent()); + + assertEquals(ingestStats.getStatsPerPipeline().size(), 1); + assertTrue(ingestStats.getStatsPerPipeline().containsKey("foo")); + + Map left = ingestStats.getStatsPerPipeline(); + Map right = serialize.getStatsPerPipeline(); + + assertEquals(right.size(), 1); + assertTrue(right.containsKey("foo")); + assertEquals(left.size(), 1); + assertTrue(left.containsKey("foo")); + IngestStats.Stats leftStats = left.get("foo"); + IngestStats.Stats rightStats = right.get("foo"); + assertEquals(leftStats.getIngestCount(), rightStats.getIngestCount()); + assertEquals(leftStats.getIngestFailedCount(), rightStats.getIngestFailedCount()); + assertEquals(leftStats.getIngestTimeInMillis(), rightStats.getIngestTimeInMillis()); + assertEquals(leftStats.getIngestCurrent(), rightStats.getIngestCurrent()); + } + + private T serialize(Writeable writeable) throws IOException { + BytesStreamOutput out = new BytesStreamOutput(); + writeable.writeTo(out); + StreamInput in = StreamInput.wrap(out.bytes()); + return writeable.readFrom(in); + } +} diff --git a/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java b/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java index e644df2a83a..b84ba928be4 100644 --- a/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.ingest.core.CompoundProcessor; import org.elasticsearch.ingest.core.IngestDocument; @@ -38,15 +39,16 @@ import org.mockito.ArgumentMatcher; import org.mockito.invocation.InvocationOnMock; import java.util.Collections; +import java.util.HashMap; import java.util.Map; import java.util.Objects; import java.util.function.BiConsumer; import java.util.function.Consumer; import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Matchers.anyBoolean; import static org.mockito.Matchers.eq; import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyBoolean; import static org.mockito.Matchers.anyString; import static org.mockito.Matchers.argThat; import static org.mockito.Mockito.doAnswer; @@ -341,6 +343,43 @@ public class PipelineExecutionServiceTests extends ESTestCase { verify(completionHandler, times(1)).accept(null); } + public void testStats() throws Exception { + IngestStats ingestStats = executionService.stats(); + assertThat(ingestStats.getStatsPerPipeline().size(), equalTo(0)); + assertThat(ingestStats.getTotalStats().getIngestCount(), equalTo(0L)); + assertThat(ingestStats.getTotalStats().getIngestCurrent(), equalTo(0L)); + assertThat(ingestStats.getTotalStats().getIngestFailedCount(), equalTo(0L)); + assertThat(ingestStats.getTotalStats().getIngestTimeInMillis(), equalTo(0L)); + + when(store.get("_id1")).thenReturn(new Pipeline("_id1", null, new CompoundProcessor())); + when(store.get("_id2")).thenReturn(new Pipeline("_id2", null, new CompoundProcessor())); + + Map configurationMap = new HashMap<>(); + configurationMap.put("_id1", new PipelineConfiguration("_id1", new BytesArray("{}"))); + configurationMap.put("_id2", new PipelineConfiguration("_id2", new BytesArray("{}"))); + executionService.updatePipelineStats(new IngestMetadata(configurationMap)); + + Consumer failureHandler = mock(Consumer.class); + Consumer completionHandler = mock(Consumer.class); + + IndexRequest indexRequest = new IndexRequest("_index"); + indexRequest.setPipeline("_id1"); + executionService.executeIndexRequest(indexRequest, failureHandler, completionHandler); + ingestStats = executionService.stats(); + assertThat(ingestStats.getStatsPerPipeline().size(), equalTo(2)); + assertThat(ingestStats.getStatsPerPipeline().get("_id1").getIngestCount(), equalTo(1L)); + assertThat(ingestStats.getStatsPerPipeline().get("_id2").getIngestCount(), equalTo(0L)); + assertThat(ingestStats.getTotalStats().getIngestCount(), equalTo(1L)); + + indexRequest.setPipeline("_id2"); + executionService.executeIndexRequest(indexRequest, failureHandler, completionHandler); + ingestStats = executionService.stats(); + assertThat(ingestStats.getStatsPerPipeline().size(), equalTo(2)); + assertThat(ingestStats.getStatsPerPipeline().get("_id1").getIngestCount(), equalTo(1L)); + assertThat(ingestStats.getStatsPerPipeline().get("_id2").getIngestCount(), equalTo(1L)); + assertThat(ingestStats.getTotalStats().getIngestCount(), equalTo(2L)); + } + private IngestDocument eqID(String index, String type, String id, Map source) { return argThat(new IngestDocumentMatcher(index, type, id, source)); } diff --git a/core/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java b/core/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java index fb0605f90b5..4009e4877b9 100644 --- a/core/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java @@ -21,24 +21,32 @@ package org.elasticsearch.ingest; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.Version; import org.elasticsearch.action.ingest.DeletePipelineRequest; import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.LocalTransportAddress; +import org.elasticsearch.ingest.core.IngestInfo; import org.elasticsearch.ingest.core.Pipeline; +import org.elasticsearch.ingest.core.ProcessorInfo; +import org.elasticsearch.ingest.processor.RemoveProcessor; import org.elasticsearch.ingest.processor.SetProcessor; import org.elasticsearch.test.ESTestCase; import org.junit.Before; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -52,6 +60,7 @@ public class PipelineStoreTests extends ESTestCase { store = new PipelineStore(Settings.EMPTY); ProcessorsRegistry.Builder registryBuilder = new ProcessorsRegistry.Builder(); registryBuilder.registerProcessor("set", (templateService, registry) -> new SetProcessor.Factory(TestTemplateService.instance())); + registryBuilder.registerProcessor("remove", (templateService, registry) -> new RemoveProcessor.Factory(TestTemplateService.instance())); store.buildProcessorFactoryRegistry(registryBuilder, null); } @@ -197,4 +206,38 @@ public class PipelineStoreTests extends ESTestCase { assertThat(pipeline, nullValue()); } + public void testValidate() throws Exception { + PutPipelineRequest putRequest = new PutPipelineRequest("_id", new BytesArray("{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}},{\"remove\" : {\"field\": \"_field\"}}]}")); + + DiscoveryNode node1 = new DiscoveryNode("_node_id1", new LocalTransportAddress("_id"), Version.CURRENT); + DiscoveryNode node2 = new DiscoveryNode("_node_id2", new LocalTransportAddress("_id"), Version.CURRENT); + Map ingestInfos = new HashMap<>(); + ingestInfos.put(node1, new IngestInfo(Arrays.asList(new ProcessorInfo("set"), new ProcessorInfo("remove")))); + ingestInfos.put(node2, new IngestInfo(Arrays.asList(new ProcessorInfo("set")))); + + try { + store.validatePipeline(ingestInfos, putRequest); + fail("exception expected"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), equalTo("Processor type [remove] is not installed on node [{_node_id2}{local}{local[_id]}]")); + } + + ingestInfos.put(node2, new IngestInfo(Arrays.asList(new ProcessorInfo("set"), new ProcessorInfo("remove")))); + store.validatePipeline(ingestInfos, putRequest); + } + + public void testValidateNoIngestInfo() throws Exception { + PutPipelineRequest putRequest = new PutPipelineRequest("_id", new BytesArray("{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}")); + try { + store.validatePipeline(Collections.emptyMap(), putRequest); + fail("exception expected"); + } catch (IllegalStateException e) { + assertThat(e.getMessage(), equalTo("Ingest info is empty")); + } + + DiscoveryNode discoveryNode = new DiscoveryNode("_node_id", new LocalTransportAddress("_id"), Version.CURRENT); + IngestInfo ingestInfo = new IngestInfo(Collections.singletonList(new ProcessorInfo("set"))); + store.validatePipeline(Collections.singletonMap(discoveryNode, ingestInfo), putRequest); + } + } diff --git a/core/src/test/java/org/elasticsearch/ingest/core/PipelineFactoryTests.java b/core/src/test/java/org/elasticsearch/ingest/core/PipelineFactoryTests.java index fdf48ff4281..537d8f020e6 100644 --- a/core/src/test/java/org/elasticsearch/ingest/core/PipelineFactoryTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/core/PipelineFactoryTests.java @@ -23,11 +23,14 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ingest.ProcessorsRegistry; import org.elasticsearch.ingest.TestProcessor; import org.elasticsearch.ingest.TestTemplateService; +import org.elasticsearch.ingest.processor.FailProcessor; +import org.elasticsearch.ingest.processor.SetProcessor; import org.elasticsearch.test.ESTestCase; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.prefs.PreferencesFactory; @@ -115,6 +118,15 @@ public class PipelineFactoryTests extends ESTestCase { assertThat(pipeline.getProcessors().get(0).getType(), equalTo("compound")); } + public void testFlattenProcessors() throws Exception { + TestProcessor testProcessor = new TestProcessor(ingestDocument -> {}); + CompoundProcessor processor1 = new CompoundProcessor(testProcessor, testProcessor); + CompoundProcessor processor2 = new CompoundProcessor(Collections.singletonList(testProcessor), Collections.singletonList(testProcessor)); + Pipeline pipeline = new Pipeline("_id", "_description", new CompoundProcessor(processor1, processor2)); + List flattened = pipeline.flattenAllProcessors(); + assertThat(flattened.size(), equalTo(4)); + } + private ProcessorsRegistry createProcessorRegistry(Map processorRegistry) { ProcessorsRegistry.Builder builder = new ProcessorsRegistry.Builder(); for (Map.Entry entry : processorRegistry.entrySet()) { diff --git a/core/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java b/core/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java index 95439ebdc26..c979b2f4013 100644 --- a/core/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java +++ b/core/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java @@ -19,9 +19,13 @@ package org.elasticsearch.node.internal; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; + +import org.elasticsearch.cli.MockTerminal; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.common.cli.CliToolTestCase; -import org.elasticsearch.common.cli.Terminal; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.env.Environment; @@ -29,17 +33,9 @@ import org.elasticsearch.test.ESTestCase; import org.junit.After; import org.junit.Before; -import java.io.IOException; -import java.io.InputStream; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.List; - import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; public class InternalSettingsPreparerTests extends ESTestCase { @@ -81,17 +77,9 @@ public class InternalSettingsPreparerTests extends ESTestCase { } public void testReplacePromptPlaceholders() { - final Terminal terminal = new CliToolTestCase.MockTerminal() { - @Override - public char[] readSecret(String message) { - return "replaced".toCharArray(); - } - - @Override - public String readText(String message) { - return "text"; - } - }; + MockTerminal terminal = new MockTerminal(); + terminal.addTextInput("text"); + terminal.addSecretInput("replaced"); Settings.Builder builder = settingsBuilder() .put(baseEnvSettings) diff --git a/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java b/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java index 693ba4a2eba..bb56d139111 100644 --- a/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java +++ b/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.http.HttpInfo; +import org.elasticsearch.ingest.core.IngestInfo; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.monitor.os.DummyOsInfo; import org.elasticsearch.monitor.os.OsInfo; @@ -46,6 +47,7 @@ import org.elasticsearch.transport.TransportInfo; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -90,6 +92,7 @@ public class NodeInfoStreamingTests extends ESTestCase { compareJsonOutput(nodeInfo.getNode(), readNodeInfo.getNode()); compareJsonOutput(nodeInfo.getOs(), readNodeInfo.getOs()); comparePluginsAndModules(nodeInfo, readNodeInfo); + compareJsonOutput(nodeInfo.getIngest(), readNodeInfo.getIngest()); } private void comparePluginsAndModules(NodeInfo nodeInfo, NodeInfo readNodeInfo) throws IOException { @@ -135,6 +138,7 @@ public class NodeInfoStreamingTests extends ESTestCase { PluginsAndModules plugins = new PluginsAndModules(); plugins.addModule(DummyPluginInfo.INSTANCE); plugins.addPlugin(DummyPluginInfo.INSTANCE); - return new NodeInfo(VersionUtils.randomVersion(random()), build, node, serviceAttributes, settings, osInfo, process, jvm, threadPoolInfo, transport, htttpInfo, plugins); + IngestInfo ingestInfo = new IngestInfo(Collections.emptyList()); + return new NodeInfo(VersionUtils.randomVersion(random()), build, node, serviceAttributes, settings, osInfo, process, jvm, threadPoolInfo, transport, htttpInfo, plugins, ingestInfo); } } diff --git a/core/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java b/core/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java index e3777e84f9a..03b7a258f4f 100644 --- a/core/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java +++ b/core/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java @@ -21,7 +21,7 @@ package org.elasticsearch.nodesinfo; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; -import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.test.ESIntegTestCase; @@ -47,11 +47,11 @@ public class SimpleNodesInfoIT extends ESIntegTestCase { final String node_2 = nodesIds.get(1); ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForGreenStatus().setWaitForNodes("2").get(); - logger.info("--> done cluster_health, status " + clusterHealth.getStatus()); + logger.info("--> done cluster_health, status {}", clusterHealth.getStatus()); String server1NodeId = internalCluster().getInstance(ClusterService.class, node_1).state().nodes().localNodeId(); String server2NodeId = internalCluster().getInstance(ClusterService.class, node_2).state().nodes().localNodeId(); - logger.info("--> started nodes: " + server1NodeId + " and " + server2NodeId); + logger.info("--> started nodes: {} and {}", server1NodeId, server2NodeId); NodesInfoResponse response = client().admin().cluster().prepareNodesInfo().execute().actionGet(); assertThat(response.getNodes().length, is(2)); @@ -91,11 +91,11 @@ public class SimpleNodesInfoIT extends ESIntegTestCase { final String node_2 = nodesIds.get(1); ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForGreenStatus().setWaitForNodes("2").get(); - logger.info("--> done cluster_health, status " + clusterHealth.getStatus()); + logger.info("--> done cluster_health, status {}", clusterHealth.getStatus()); String server1NodeId = internalCluster().getInstance(ClusterService.class, node_1).state().nodes().localNodeId(); String server2NodeId = internalCluster().getInstance(ClusterService.class, node_2).state().nodes().localNodeId(); - logger.info("--> started nodes: " + server1NodeId + " and " + server2NodeId); + logger.info("--> started nodes: {} and {}", server1NodeId, server2NodeId); NodesInfoResponse response = client().admin().cluster().prepareNodesInfo().execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/percolator/ConcurrentPercolatorIT.java b/core/src/test/java/org/elasticsearch/percolator/ConcurrentPercolatorIT.java deleted file mode 100644 index e99cf51758b..00000000000 --- a/core/src/test/java/org/elasticsearch/percolator/ConcurrentPercolatorIT.java +++ /dev/null @@ -1,395 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.percolator; - -import org.elasticsearch.action.delete.DeleteResponse; -import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.percolate.PercolateResponse; -import org.elasticsearch.action.percolate.PercolateSourceBuilder; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.test.ESIntegTestCase; - -import java.util.Random; -import java.util.Set; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.Semaphore; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; - -import static org.elasticsearch.index.query.QueryBuilders.boolQuery; -import static org.elasticsearch.index.query.QueryBuilders.termQuery; -import static org.elasticsearch.percolator.PercolatorTestUtil.convertFromTextArray; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.hamcrest.Matchers.arrayContaining; -import static org.hamcrest.Matchers.arrayContainingInAnyOrder; -import static org.hamcrest.Matchers.arrayWithSize; -import static org.hamcrest.Matchers.emptyArray; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.nullValue; - - -/** - * - */ -public class ConcurrentPercolatorIT extends ESIntegTestCase { - public void testSimpleConcurrentPercolator() throws Exception { - // We need to index a document / define mapping, otherwise field1 doesn't get recognized as number field. - // If we don't do this, then 'test2' percolate query gets parsed as a TermQuery and not a RangeQuery. - // The percolate api doesn't parse the doc if no queries have registered, so it can't lazily create a mapping - assertAcked(prepareCreate("index").addMapping("type", "field1", "type=long", "field2", "type=text")); // random # shards better has a mapping! - ensureGreen(); - - final BytesReference onlyField1 = XContentFactory.jsonBuilder().startObject().startObject("doc") - .field("field1", 1) - .endObject().endObject().bytes(); - final BytesReference onlyField2 = XContentFactory.jsonBuilder().startObject().startObject("doc") - .field("field2", "value") - .endObject().endObject().bytes(); - final BytesReference bothFields = XContentFactory.jsonBuilder().startObject().startObject("doc") - .field("field1", 1) - .field("field2", "value") - .endObject().endObject().bytes(); - - client().prepareIndex("index", "type", "1").setSource(XContentFactory.jsonBuilder().startObject() - .field("field1", 1) - .field("field2", "value") - .endObject()).execute().actionGet(); - - client().prepareIndex("index", PercolatorService.TYPE_NAME, "test1") - .setSource(XContentFactory.jsonBuilder().startObject().field("query", termQuery("field2", "value")).endObject()) - .execute().actionGet(); - client().prepareIndex("index", PercolatorService.TYPE_NAME, "test2") - .setSource(XContentFactory.jsonBuilder().startObject().field("query", termQuery("field1", 1)).endObject()) - .execute().actionGet(); - refresh(); // make sure it's refreshed - - final CountDownLatch start = new CountDownLatch(1); - final AtomicBoolean stop = new AtomicBoolean(false); - final AtomicInteger counts = new AtomicInteger(0); - final AtomicReference exceptionHolder = new AtomicReference<>(); - Thread[] threads = new Thread[scaledRandomIntBetween(2, 5)]; - final int numberOfPercolations = scaledRandomIntBetween(1000, 10000); - - for (int i = 0; i < threads.length; i++) { - Runnable r = new Runnable() { - @Override - public void run() { - try { - start.await(); - while (!stop.get()) { - int count = counts.incrementAndGet(); - if ((count > numberOfPercolations)) { - stop.set(true); - } - PercolateResponse percolate; - if (count % 3 == 0) { - percolate = client().preparePercolate().setIndices("index").setDocumentType("type") - .setSource(bothFields) - .execute().actionGet(); - assertThat(percolate.getMatches(), arrayWithSize(2)); - assertThat(convertFromTextArray(percolate.getMatches(), "index"), arrayContainingInAnyOrder("test1", "test2")); - } else if (count % 3 == 1) { - percolate = client().preparePercolate().setIndices("index").setDocumentType("type") - .setSource(onlyField2) - .execute().actionGet(); - assertThat(percolate.getMatches(), arrayWithSize(1)); - assertThat(convertFromTextArray(percolate.getMatches(), "index"), arrayContaining("test1")); - } else { - percolate = client().preparePercolate().setIndices("index").setDocumentType("type") - .setSource(onlyField1) - .execute().actionGet(); - assertThat(percolate.getMatches(), arrayWithSize(1)); - assertThat(convertFromTextArray(percolate.getMatches(), "index"), arrayContaining("test2")); - } - } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } catch (Throwable e) { - exceptionHolder.set(e); - Thread.currentThread().interrupt(); - } - } - }; - threads[i] = new Thread(r); - threads[i].start(); - } - - start.countDown(); - for (Thread thread : threads) { - thread.join(); - } - - Throwable assertionError = exceptionHolder.get(); - if (assertionError != null) { - assertionError.printStackTrace(); - } - assertThat(assertionError + " should be null", assertionError, nullValue()); - } - - public void testConcurrentAddingAndPercolating() throws Exception { - assertAcked(prepareCreate("index").addMapping("type", "field1", "type=text", "field2", "type=text")); - ensureGreen(); - final int numIndexThreads = scaledRandomIntBetween(1, 3); - final int numPercolateThreads = scaledRandomIntBetween(2, 6); - final int numPercolatorOperationsPerThread = scaledRandomIntBetween(100, 1000); - - final Set exceptionsHolder = ConcurrentCollections.newConcurrentSet(); - final CountDownLatch start = new CountDownLatch(1); - final AtomicInteger runningPercolateThreads = new AtomicInteger(numPercolateThreads); - final AtomicInteger type1 = new AtomicInteger(); - final AtomicInteger type2 = new AtomicInteger(); - final AtomicInteger type3 = new AtomicInteger(); - - final AtomicInteger idGen = new AtomicInteger(); - - Thread[] indexThreads = new Thread[numIndexThreads]; - for (int i = 0; i < numIndexThreads; i++) { - final Random rand = new Random(getRandom().nextLong()); - Runnable r = new Runnable() { - @Override - public void run() { - try { - XContentBuilder onlyField1 = XContentFactory.jsonBuilder().startObject() - .field("query", termQuery("field1", "value")).endObject(); - XContentBuilder onlyField2 = XContentFactory.jsonBuilder().startObject() - .field("query", termQuery("field2", "value")).endObject(); - XContentBuilder field1And2 = XContentFactory.jsonBuilder().startObject() - .field("query", boolQuery().must(termQuery("field1", "value")).must(termQuery("field2", "value"))).endObject(); - - start.await(); - while (runningPercolateThreads.get() > 0) { - Thread.sleep(100); - int x = rand.nextInt(3); - String id = Integer.toString(idGen.incrementAndGet()); - IndexResponse response; - switch (x) { - case 0: - response = client().prepareIndex("index", PercolatorService.TYPE_NAME, id) - .setSource(onlyField1) - .setRefresh(true) - .execute().actionGet(); - type1.incrementAndGet(); - break; - case 1: - response = client().prepareIndex("index", PercolatorService.TYPE_NAME, id) - .setSource(onlyField2) - .setRefresh(true) - .execute().actionGet(); - type2.incrementAndGet(); - break; - case 2: - response = client().prepareIndex("index", PercolatorService.TYPE_NAME, id) - .setSource(field1And2) - .setRefresh(true) - .execute().actionGet(); - type3.incrementAndGet(); - break; - default: - throw new IllegalStateException("Illegal x=" + x); - } - assertThat(response.getId(), equalTo(id)); - assertThat(response.getVersion(), equalTo(1L)); - } - } catch (Throwable t) { - exceptionsHolder.add(t); - logger.error("Error in indexing thread...", t); - } - } - }; - indexThreads[i] = new Thread(r); - indexThreads[i].start(); - } - - Thread[] percolateThreads = new Thread[numPercolateThreads]; - for (int i = 0; i < numPercolateThreads; i++) { - final Random rand = new Random(getRandom().nextLong()); - Runnable r = new Runnable() { - @Override - public void run() { - try { - XContentBuilder onlyField1Doc = XContentFactory.jsonBuilder().startObject().startObject("doc") - .field("field1", "value") - .endObject().endObject(); - XContentBuilder onlyField2Doc = XContentFactory.jsonBuilder().startObject().startObject("doc") - .field("field2", "value") - .endObject().endObject(); - XContentBuilder field1AndField2Doc = XContentFactory.jsonBuilder().startObject().startObject("doc") - .field("field1", "value") - .field("field2", "value") - .endObject().endObject(); - start.await(); - for (int counter = 0; counter < numPercolatorOperationsPerThread; counter++) { - int x = rand.nextInt(3); - int atLeastExpected; - PercolateResponse response; - switch (x) { - case 0: - atLeastExpected = type1.get(); - response = client().preparePercolate().setIndices("index").setDocumentType("type") - .setSource(onlyField1Doc).execute().actionGet(); - assertNoFailures(response); - assertThat(response.getSuccessfulShards(), equalTo(response.getTotalShards())); - assertThat(response.getCount(), greaterThanOrEqualTo((long) atLeastExpected)); - break; - case 1: - atLeastExpected = type2.get(); - response = client().preparePercolate().setIndices("index").setDocumentType("type") - .setSource(onlyField2Doc).execute().actionGet(); - assertNoFailures(response); - assertThat(response.getSuccessfulShards(), equalTo(response.getTotalShards())); - assertThat(response.getCount(), greaterThanOrEqualTo((long) atLeastExpected)); - break; - case 2: - atLeastExpected = type3.get(); - response = client().preparePercolate().setIndices("index").setDocumentType("type") - .setSource(field1AndField2Doc).execute().actionGet(); - assertNoFailures(response); - assertThat(response.getSuccessfulShards(), equalTo(response.getTotalShards())); - assertThat(response.getCount(), greaterThanOrEqualTo((long) atLeastExpected)); - break; - } - } - } catch (Throwable t) { - exceptionsHolder.add(t); - logger.error("Error in percolate thread...", t); - } finally { - runningPercolateThreads.decrementAndGet(); - } - } - }; - percolateThreads[i] = new Thread(r); - percolateThreads[i].start(); - } - - start.countDown(); - for (Thread thread : indexThreads) { - thread.join(); - } - for (Thread thread : percolateThreads) { - thread.join(); - } - - for (Throwable t : exceptionsHolder) { - logger.error("Unexpected exception {}", t.getMessage(), t); - } - assertThat(exceptionsHolder.isEmpty(), equalTo(true)); - } - - public void testConcurrentAddingAndRemovingWhilePercolating() throws Exception { - assertAcked(prepareCreate("index").addMapping("type", "field1", "type=text")); - ensureGreen(); - final int numIndexThreads = scaledRandomIntBetween(1, 3); - final int numberPercolateOperation = scaledRandomIntBetween(10, 100); - - final AtomicReference exceptionHolder = new AtomicReference<>(null); - final AtomicInteger idGen = new AtomicInteger(0); - final Set liveIds = ConcurrentCollections.newConcurrentSet(); - final AtomicBoolean run = new AtomicBoolean(true); - Thread[] indexThreads = new Thread[numIndexThreads]; - final Semaphore semaphore = new Semaphore(numIndexThreads, true); - for (int i = 0; i < indexThreads.length; i++) { - final Random rand = new Random(getRandom().nextLong()); - Runnable r = new Runnable() { - @Override - public void run() { - try { - XContentBuilder doc = XContentFactory.jsonBuilder().startObject() - .field("query", termQuery("field1", "value")).endObject(); - outer: - while (run.get()) { - semaphore.acquire(); - try { - if (!liveIds.isEmpty() && rand.nextInt(100) < 19) { - String id; - do { - if (liveIds.isEmpty()) { - continue outer; - } - id = Integer.toString(randomInt(idGen.get())); - } while (!liveIds.remove(id)); - - DeleteResponse response = client().prepareDelete("index", PercolatorService.TYPE_NAME, id) - .setRefresh(true) - .execute().actionGet(); - assertThat(response.getId(), equalTo(id)); - assertThat("doc[" + id + "] should have been deleted, but isn't", response.isFound(), equalTo(true)); - } else { - String id = Integer.toString(idGen.getAndIncrement()); - IndexResponse response = client().prepareIndex("index", PercolatorService.TYPE_NAME, id) - .setSource(doc) - .setRefresh(true) - .execute().actionGet(); - liveIds.add(id); - assertThat(response.isCreated(), equalTo(true)); // We only add new docs - assertThat(response.getId(), equalTo(id)); - } - } finally { - semaphore.release(); - } - } - } catch (InterruptedException iex) { - logger.error("indexing thread was interrupted...", iex); - run.set(false); - } catch (Throwable t) { - run.set(false); - exceptionHolder.set(t); - logger.error("Error in indexing thread...", t); - } - } - }; - indexThreads[i] = new Thread(r); - indexThreads[i].start(); - } - - String percolateDoc = XContentFactory.jsonBuilder().startObject() - .field("field1", "value") - .endObject().string(); - for (int counter = 0; counter < numberPercolateOperation; counter++) { - Thread.sleep(5); - semaphore.acquire(numIndexThreads); - try { - if (!run.get()) { - break; - } - int atLeastExpected = liveIds.size(); - PercolateResponse response = client().preparePercolate().setIndices("index").setDocumentType("type") - .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc(percolateDoc)) - .setSize(atLeastExpected) - .get(); - assertThat(response.getShardFailures(), emptyArray()); - assertThat(response.getSuccessfulShards(), equalTo(response.getTotalShards())); - assertThat(response.getMatches().length, equalTo(atLeastExpected)); - } finally { - semaphore.release(numIndexThreads); - } - } - run.set(false); - for (Thread thread : indexThreads) { - thread.join(); - } - assertThat("exceptionHolder should have been empty, but holds: " + exceptionHolder.toString(), exceptionHolder.get(), nullValue()); - } - -} diff --git a/core/src/test/java/org/elasticsearch/percolator/MultiPercolatorIT.java b/core/src/test/java/org/elasticsearch/percolator/MultiPercolatorIT.java index 6dfcc5a878a..f51180f5a5a 100644 --- a/core/src/test/java/org/elasticsearch/percolator/MultiPercolatorIT.java +++ b/core/src/test/java/org/elasticsearch/percolator/MultiPercolatorIT.java @@ -24,8 +24,10 @@ import org.elasticsearch.action.percolate.MultiPercolateRequestBuilder; import org.elasticsearch.action.percolate.MultiPercolateResponse; import org.elasticsearch.action.percolate.PercolateSourceBuilder; import org.elasticsearch.client.Requests; +import org.elasticsearch.common.compress.NotXContentException; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.percolator.PercolatorFieldMapper; import org.elasticsearch.index.query.Operator; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.test.ESIntegTestCase; @@ -49,6 +51,7 @@ import static org.hamcrest.Matchers.arrayContainingInAnyOrder; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -60,19 +63,19 @@ public class MultiPercolatorIT extends ESIntegTestCase { ensureGreen(); logger.info("--> register a queries"); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "1") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "1") .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject()) .execute().actionGet(); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "2") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "2") .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject()) .execute().actionGet(); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "3") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "3") .setSource(jsonBuilder().startObject().field("query", boolQuery() .must(matchQuery("field1", "b")) .must(matchQuery("field1", "c")) ).endObject()) .execute().actionGet(); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "4") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "4") .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) .execute().actionGet(); refresh(); @@ -122,7 +125,7 @@ public class MultiPercolatorIT extends ESIntegTestCase { item = response.getItems()[4]; assertThat(item.getResponse(), nullValue()); assertThat(item.getErrorMessage(), notNullValue()); - assertThat(item.getErrorMessage(), containsString("document missing")); + assertThat(item.getErrorMessage(), containsString("[test/type/5] doesn't exist")); } public void testWithRouting() throws Exception { @@ -130,22 +133,22 @@ public class MultiPercolatorIT extends ESIntegTestCase { ensureGreen(); logger.info("--> register a queries"); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "1") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "1") .setRouting("a") .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject()) .execute().actionGet(); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "2") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "2") .setRouting("a") .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject()) .execute().actionGet(); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "3") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "3") .setRouting("a") .setSource(jsonBuilder().startObject().field("query", boolQuery() .must(matchQuery("field1", "b")) .must(matchQuery("field1", "c")) ).endObject()) .execute().actionGet(); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "4") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "4") .setRouting("a") .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) .execute().actionGet(); @@ -201,7 +204,7 @@ public class MultiPercolatorIT extends ESIntegTestCase { item = response.getItems()[4]; assertThat(item.getResponse(), nullValue()); assertThat(item.getErrorMessage(), notNullValue()); - assertThat(item.getErrorMessage(), containsString("document missing")); + assertThat(item.getErrorMessage(), containsString("[test/type/5] doesn't exist")); } public void testExistingDocsOnly() throws Exception { @@ -210,7 +213,7 @@ public class MultiPercolatorIT extends ESIntegTestCase { int numQueries = randomIntBetween(50, 100); logger.info("--> register a queries"); for (int i = 0; i < numQueries; i++) { - client().prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i)) + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, Integer.toString(i)) .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) .execute().actionGet(); } @@ -253,7 +256,7 @@ public class MultiPercolatorIT extends ESIntegTestCase { assertThat(response.items().length, equalTo(numPercolateRequest)); for (MultiPercolateResponse.Item item : response) { assertThat(item.isFailure(), equalTo(true)); - assertThat(item.getErrorMessage(), containsString("document missing")); + assertThat(item.getErrorMessage(), containsString("doesn't exist")); assertThat(item.getResponse(), nullValue()); } @@ -283,12 +286,10 @@ public class MultiPercolatorIT extends ESIntegTestCase { createIndex("test"); ensureGreen(); - NumShards test = getNumShards("test"); - int numQueries = randomIntBetween(50, 100); logger.info("--> register a queries"); for (int i = 0; i < numQueries; i++) { - client().prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i)) + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, Integer.toString(i)) .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) .execute().actionGet(); } @@ -324,13 +325,8 @@ public class MultiPercolatorIT extends ESIntegTestCase { response = builder.execute().actionGet(); assertThat(response.items().length, equalTo(numPercolateRequest)); for (MultiPercolateResponse.Item item : response) { - assertThat(item.isFailure(), equalTo(false)); - assertThat(item.getResponse().getSuccessfulShards(), equalTo(0)); - assertThat(item.getResponse().getShardFailures().length, equalTo(test.numPrimaries)); - for (ShardOperationFailedException shardFailure : item.getResponse().getShardFailures()) { - assertThat(shardFailure.reason(), containsString("Failed to derive xcontent")); - assertThat(shardFailure.status().getStatus(), equalTo(400)); - } + assertThat(item.isFailure(), equalTo(true)); + assertThat(item.getFailure(), notNullValue()); } // one valid request @@ -402,7 +398,7 @@ public class MultiPercolatorIT extends ESIntegTestCase { assertAcked(client().admin().indices().prepareCreate("nestedindex").addMapping("company", mapping)); ensureGreen("nestedindex"); - client().prepareIndex("nestedindex", PercolatorService.TYPE_NAME, "Q").setSource(jsonBuilder().startObject() + client().prepareIndex("nestedindex", PercolatorFieldMapper.TYPE_NAME, "Q").setSource(jsonBuilder().startObject() .field("query", QueryBuilders.nestedQuery("employee", QueryBuilders.matchQuery("employee.name", "virginia potts").operator(Operator.AND)).scoreMode(ScoreMode.Avg)).endObject()).get(); refresh(); diff --git a/core/src/test/java/org/elasticsearch/percolator/PercolateDocumentParserTests.java b/core/src/test/java/org/elasticsearch/percolator/PercolateDocumentParserTests.java deleted file mode 100644 index 854a25358e7..00000000000 --- a/core/src/test/java/org/elasticsearch/percolator/PercolateDocumentParserTests.java +++ /dev/null @@ -1,188 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.percolator; - -import org.apache.lucene.index.Term; -import org.apache.lucene.search.TermQuery; -import org.elasticsearch.Version; -import org.elasticsearch.action.percolate.PercolateShardRequest; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.analysis.AnalysisService; -import org.elasticsearch.index.analysis.AnalyzerProvider; -import org.elasticsearch.index.analysis.CharFilterFactory; -import org.elasticsearch.index.analysis.TokenFilterFactory; -import org.elasticsearch.index.analysis.TokenizerFactory; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.query.QueryParser; -import org.elasticsearch.index.query.QueryShardContext; -import org.elasticsearch.index.query.TermQueryParser; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.similarity.SimilarityService; -import org.elasticsearch.indices.IndicesModule; -import org.elasticsearch.indices.query.IndicesQueriesRegistry; -import org.elasticsearch.search.SearchShardTarget; -import org.elasticsearch.search.aggregations.AggregationBinaryParseElement; -import org.elasticsearch.search.aggregations.AggregationParseElement; -import org.elasticsearch.search.aggregations.AggregationPhase; -import org.elasticsearch.search.aggregations.AggregatorParsers; -import org.elasticsearch.search.highlight.HighlightPhase; -import org.elasticsearch.search.highlight.Highlighters; -import org.elasticsearch.search.sort.SortParseElement; -import org.elasticsearch.test.ESTestCase; -import org.junit.Before; -import org.mockito.Mockito; - -import java.util.Collections; -import java.util.Map; - -import static java.util.Collections.singletonMap; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.nullValue; - -public class PercolateDocumentParserTests extends ESTestCase { - - private MapperService mapperService; - private PercolateDocumentParser parser; - private QueryShardContext queryShardContext; - private PercolateShardRequest request; - - @Before - public void init() { - IndexSettings indexSettings = new IndexSettings(new IndexMetaData.Builder("_index").settings( - Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) - .build(), Settings.EMPTY); - AnalysisService analysisService = new AnalysisService(indexSettings, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap()); - IndicesModule indicesModule = new IndicesModule(); - mapperService = new MapperService(indexSettings, analysisService, new SimilarityService(indexSettings, Collections.emptyMap()), indicesModule.getMapperRegistry(), () -> null); - - Map> parsers = singletonMap("term", new TermQueryParser()); - IndicesQueriesRegistry indicesQueriesRegistry = new IndicesQueriesRegistry(indexSettings.getSettings(), parsers); - - queryShardContext = new QueryShardContext(indexSettings, null, null, mapperService, null, null, indicesQueriesRegistry); - - HighlightPhase highlightPhase = new HighlightPhase(Settings.EMPTY, new Highlighters()); - AggregatorParsers aggregatorParsers = new AggregatorParsers(Collections.emptySet(), Collections.emptySet(), - new NamedWriteableRegistry()); - AggregationPhase aggregationPhase = new AggregationPhase(new AggregationParseElement(aggregatorParsers, indicesQueriesRegistry), - new AggregationBinaryParseElement(aggregatorParsers, indicesQueriesRegistry)); - parser = new PercolateDocumentParser(highlightPhase, new SortParseElement(), aggregationPhase); - - request = Mockito.mock(PercolateShardRequest.class); - Mockito.when(request.shardId()).thenReturn(new ShardId("_index", "_na_", 0)); - Mockito.when(request.documentType()).thenReturn("type"); - } - - public void testParseDoc() throws Exception { - XContentBuilder source = jsonBuilder().startObject() - .startObject("doc") - .field("field1", "value1") - .endObject() - .endObject(); - Mockito.when(request.source()).thenReturn(source.bytes()); - - PercolateContext context = new PercolateContext(request, new SearchShardTarget("_node", new Index("_index", "_na_"), 0), mapperService, queryShardContext); - ParsedDocument parsedDocument = parser.parse(request, context, mapperService); - assertThat(parsedDocument.rootDoc().get("field1"), equalTo("value1")); - } - - public void testParseDocAndOtherOptions() throws Exception { - XContentBuilder source = jsonBuilder().startObject() - .startObject("doc") - .field("field1", "value1") - .endObject() - .startObject("query") - .startObject("term").field("field1", "value1").endObject() - .endObject() - .field("track_scores", true) - .field("size", 123) - .startObject("sort").startObject("_score").endObject().endObject() - .endObject(); - Mockito.when(request.source()).thenReturn(source.bytes()); - - PercolateContext context = new PercolateContext(request, new SearchShardTarget("_node", new Index("_index", "_na_"), 0), mapperService, queryShardContext); - ParsedDocument parsedDocument = parser.parse(request, context, mapperService); - assertThat(parsedDocument.rootDoc().get("field1"), equalTo("value1")); - assertThat(context.percolateQuery(), equalTo(new TermQuery(new Term("field1", "value1")))); - assertThat(context.trackScores(), is(true)); - assertThat(context.size(), is(123)); - assertThat(context.sort(), nullValue()); - } - - public void testParseDocSource() throws Exception { - XContentBuilder source = jsonBuilder().startObject() - .startObject("query") - .startObject("term").field("field1", "value1").endObject() - .endObject() - .field("track_scores", true) - .field("size", 123) - .startObject("sort").startObject("_score").endObject().endObject() - .endObject(); - XContentBuilder docSource = jsonBuilder().startObject() - .field("field1", "value1") - .endObject(); - Mockito.when(request.source()).thenReturn(source.bytes()); - Mockito.when(request.docSource()).thenReturn(docSource.bytes()); - - PercolateContext context = new PercolateContext(request, new SearchShardTarget("_node", new Index("_index", "_na_"), 0), mapperService, queryShardContext); - ParsedDocument parsedDocument = parser.parse(request, context, mapperService); - assertThat(parsedDocument.rootDoc().get("field1"), equalTo("value1")); - assertThat(context.percolateQuery(), equalTo(new TermQuery(new Term("field1", "value1")))); - assertThat(context.trackScores(), is(true)); - assertThat(context.size(), is(123)); - assertThat(context.sort(), nullValue()); - } - - public void testParseDocSourceAndSource() throws Exception { - XContentBuilder source = jsonBuilder().startObject() - .startObject("doc") - .field("field1", "value1") - .endObject() - .startObject("query") - .startObject("term").field("field1", "value1").endObject() - .endObject() - .field("track_scores", true) - .field("size", 123) - .startObject("sort").startObject("_score").endObject().endObject() - .endObject(); - XContentBuilder docSource = jsonBuilder().startObject() - .field("field1", "value1") - .endObject(); - Mockito.when(request.source()).thenReturn(source.bytes()); - Mockito.when(request.docSource()).thenReturn(docSource.bytes()); - - PercolateContext context = new PercolateContext(request, new SearchShardTarget("_node", new Index("_index", "_na_"), 0), mapperService, queryShardContext); - try { - parser.parse(request, context, mapperService); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), equalTo("Can't specify the document to percolate in the source of the request and as document id")); - } - } - -} diff --git a/core/src/test/java/org/elasticsearch/percolator/PercolatorAggregationsIT.java b/core/src/test/java/org/elasticsearch/percolator/PercolatorAggregationsIT.java index f07e50b1dfc..8393c80786c 100644 --- a/core/src/test/java/org/elasticsearch/percolator/PercolatorAggregationsIT.java +++ b/core/src/test/java/org/elasticsearch/percolator/PercolatorAggregationsIT.java @@ -21,6 +21,7 @@ package org.elasticsearch.percolator; import org.elasticsearch.action.percolate.PercolateRequestBuilder; import org.elasticsearch.action.percolate.PercolateResponse; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.percolator.PercolatorFieldMapper; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.AggregationBuilders; @@ -69,7 +70,7 @@ public class PercolatorAggregationsIT extends ESIntegTestCase { String value = values[i % numUniqueQueries]; expectedCount[i % numUniqueQueries]++; QueryBuilder queryBuilder = matchQuery("field1", value); - client().prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i)) + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, Integer.toString(i)) .setSource(jsonBuilder().startObject().field("query", queryBuilder).field("field2", "b").endObject()).execute() .actionGet(); } @@ -134,7 +135,7 @@ public class PercolatorAggregationsIT extends ESIntegTestCase { String value = values[i % numUniqueQueries]; expectedCount[i % numUniqueQueries]++; QueryBuilder queryBuilder = matchQuery("field1", value); - client().prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i)) + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, Integer.toString(i)) .setSource(jsonBuilder().startObject().field("query", queryBuilder).field("field2", "b").endObject()).execute() .actionGet(); } @@ -212,7 +213,7 @@ public class PercolatorAggregationsIT extends ESIntegTestCase { for (int i = 0; i < numQueries; i++) { String value = "value0"; QueryBuilder queryBuilder = matchQuery("field1", value); - client().prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i)) + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, Integer.toString(i)) .setSource(jsonBuilder().startObject().field("query", queryBuilder).field("field2", i % 3 == 0 ? "b" : "a").endObject()) .execute() .actionGet(); diff --git a/core/src/test/java/org/elasticsearch/percolator/PercolatorIT.java b/core/src/test/java/org/elasticsearch/percolator/PercolatorIT.java index 01494aab72d..f6f5d260550 100644 --- a/core/src/test/java/org/elasticsearch/percolator/PercolatorIT.java +++ b/core/src/test/java/org/elasticsearch/percolator/PercolatorIT.java @@ -20,11 +20,9 @@ package org.elasticsearch.percolator; import com.vividsolutions.jts.geom.Coordinate; import org.apache.lucene.search.join.ScoreMode; -import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; -import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; -import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.percolate.PercolateResponse; import org.elasticsearch.action.percolate.PercolateSourceBuilder; @@ -38,17 +36,15 @@ import org.elasticsearch.common.settings.Settings.Builder; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.index.engine.DocumentMissingException; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.index.percolator.PercolatorFieldMapper; import org.elasticsearch.index.query.Operator; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.index.query.functionscore.weight.WeightBuilder; import org.elasticsearch.index.query.support.QueryInnerHits; -import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.highlight.HighlightBuilder; -import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.test.ESIntegTestCase; import java.io.IOException; @@ -83,7 +79,6 @@ import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.fieldValueFactorFunction; import static org.elasticsearch.percolator.PercolatorTestUtil.convertFromTextArray; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertMatchCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; @@ -94,10 +89,7 @@ import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.emptyArray; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; /** @@ -113,19 +105,19 @@ public class PercolatorIT extends ESIntegTestCase { client().prepareIndex("test", "type", "1").setSource("field1", "value").execute().actionGet(); logger.info("--> register a queries"); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "1") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "1") .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject()) .execute().actionGet(); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "2") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "2") .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject()) .execute().actionGet(); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "3") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "3") .setSource(jsonBuilder().startObject().field("query", boolQuery() .must(matchQuery("field1", "b")) .must(matchQuery("field1", "c")) ).endObject()) .execute().actionGet(); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "4") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "4") .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) .execute().actionGet(); refresh(); @@ -173,7 +165,8 @@ public class PercolatorIT extends ESIntegTestCase { .setGetRequest(Requests.getRequest("test").type("type").id("5")) .execute().actionGet(); fail("Exception should have been thrown"); - } catch (DocumentMissingException e) { + } catch (ResourceNotFoundException e) { + assertThat(e.getMessage(), equalTo("percolate document [test/type/5] doesn't exist")); } } @@ -194,7 +187,7 @@ public class PercolatorIT extends ESIntegTestCase { assertThat(response.getMatches(), emptyArray()); // add first query... - client().prepareIndex("test", PercolatorService.TYPE_NAME, "test1") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "test1") .setSource(XContentFactory.jsonBuilder().startObject().field("query", termQuery("field2", "value")).endObject()) .execute().actionGet(); refresh(); @@ -207,7 +200,7 @@ public class PercolatorIT extends ESIntegTestCase { assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContaining("test1")); // add second query... - client().prepareIndex("test", PercolatorService.TYPE_NAME, "test2") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "test2") .setSource(XContentFactory.jsonBuilder().startObject().field("query", termQuery("field1", 1)).endObject()) .execute().actionGet(); refresh(); @@ -221,7 +214,7 @@ public class PercolatorIT extends ESIntegTestCase { assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("test1", "test2")); - client().prepareDelete("test", PercolatorService.TYPE_NAME, "test2").execute().actionGet(); + client().prepareDelete("test", PercolatorFieldMapper.TYPE_NAME, "test2").execute().actionGet(); refresh(); response = client().preparePercolate() .setIndices("test").setDocumentType("type1") @@ -239,7 +232,7 @@ public class PercolatorIT extends ESIntegTestCase { logger.info("--> register a queries"); for (int i = 1; i <= 100; i++) { - client().prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i)) + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, Integer.toString(i)) .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) .setRouting(Integer.toString(i % 2)) .execute().actionGet(); @@ -282,7 +275,7 @@ public class PercolatorIT extends ESIntegTestCase { client().prepareIndex("my-queries-index", "test", "1").setSource("field1", "value1").execute().actionGet(); logger.info("--> register a query"); - client().prepareIndex("my-queries-index", PercolatorService.TYPE_NAME, "kuku1") + client().prepareIndex("my-queries-index", PercolatorFieldMapper.TYPE_NAME, "kuku1") .setSource(jsonBuilder().startObject() .field("color", "blue") .field("query", termQuery("field1", "value1")) @@ -296,7 +289,7 @@ public class PercolatorIT extends ESIntegTestCase { client().prepareIndex("my-queries-index", "test", "1").setSource("field1", "value1").execute().actionGet(); logger.info("--> register a query"); - client().prepareIndex("my-queries-index", PercolatorService.TYPE_NAME, "kuku2") + client().prepareIndex("my-queries-index", PercolatorFieldMapper.TYPE_NAME, "kuku2") .setSource(jsonBuilder().startObject() .field("color", "blue") .field("query", termQuery("field1", "value1")) @@ -323,7 +316,7 @@ public class PercolatorIT extends ESIntegTestCase { ensureGreen(); logger.info("--> register a query"); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "1") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "1") .setSource(jsonBuilder().startObject() .field("source", "productizer") .field("query", QueryBuilders.constantScoreQuery(QueryBuilders.queryStringQuery("filingcategory:s"))) @@ -351,7 +344,7 @@ public class PercolatorIT extends ESIntegTestCase { ensureGreen(); logger.info("--> register a query"); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "kuku") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "kuku") .setSource(jsonBuilder().startObject() .field("color", "blue") .field("query", termQuery("field1", "value1")) @@ -359,7 +352,7 @@ public class PercolatorIT extends ESIntegTestCase { .execute().actionGet(); refresh(); SearchResponse countResponse = client().prepareSearch().setSize(0) - .setQuery(matchAllQuery()).setTypes(PercolatorService.TYPE_NAME) + .setQuery(matchAllQuery()).setTypes(PercolatorFieldMapper.TYPE_NAME) .execute().actionGet(); assertThat(countResponse.getHits().totalHits(), equalTo(1L)); @@ -388,7 +381,7 @@ public class PercolatorIT extends ESIntegTestCase { client().admin().indices().prepareDelete("test").execute().actionGet(); logger.info("--> make sure percolated queries for it have been deleted as well"); countResponse = client().prepareSearch().setSize(0) - .setQuery(matchAllQuery()).setTypes(PercolatorService.TYPE_NAME) + .setQuery(matchAllQuery()).setTypes(PercolatorFieldMapper.TYPE_NAME) .execute().actionGet(); assertHitCount(countResponse, 0L); } @@ -398,7 +391,7 @@ public class PercolatorIT extends ESIntegTestCase { ensureGreen(); logger.info("--> register a query 1"); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "kuku") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "kuku") .setSource(jsonBuilder().startObject() .field("color", "blue") .field("query", termQuery("field1", "value1")) @@ -407,7 +400,7 @@ public class PercolatorIT extends ESIntegTestCase { .execute().actionGet(); logger.info("--> register a query 2"); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "bubu") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "bubu") .setSource(jsonBuilder().startObject() .field("color", "green") .field("query", termQuery("field1", "value2")) @@ -441,7 +434,7 @@ public class PercolatorIT extends ESIntegTestCase { ensureGreen(); logger.info("--> register a query 1"); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "kuku") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "kuku") .setSource(jsonBuilder().startObject() .field("color", "blue") .field("query", termQuery("field1", "value1")) @@ -458,7 +451,7 @@ public class PercolatorIT extends ESIntegTestCase { assertThat(convertFromTextArray(percolate.getMatches(), "test"), arrayContaining("kuku")); logger.info("--> register a query 2"); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "bubu") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "bubu") .setSource(jsonBuilder().startObject() .field("color", "green") .field("query", termQuery("field1", "value2")) @@ -475,7 +468,7 @@ public class PercolatorIT extends ESIntegTestCase { assertThat(convertFromTextArray(percolate.getMatches(), "test"), arrayContaining("bubu")); logger.info("--> register a query 3"); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "susu") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "susu") .setSource(jsonBuilder().startObject() .field("color", "red") .field("query", termQuery("field1", "value2")) @@ -495,7 +488,7 @@ public class PercolatorIT extends ESIntegTestCase { assertThat(convertFromTextArray(percolate.getMatches(), "test"), arrayContaining("susu")); logger.info("--> deleting query 1"); - client().prepareDelete("test", PercolatorService.TYPE_NAME, "kuku").setRefresh(true).execute().actionGet(); + client().prepareDelete("test", PercolatorFieldMapper.TYPE_NAME, "kuku").setRefresh(true).execute().actionGet(); percolate = client().preparePercolate() .setIndices("test").setDocumentType("type1") @@ -512,7 +505,10 @@ public class PercolatorIT extends ESIntegTestCase { ensureGreen(); logger.info("--> register a query"); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "1") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "1") + .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) + .execute().actionGet(); + client().prepareIndex("test2", PercolatorFieldMapper.TYPE_NAME, "1") .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) .execute().actionGet(); refresh(); @@ -528,67 +524,7 @@ public class PercolatorIT extends ESIntegTestCase { NumShards numShards = getNumShards("test"); IndicesStatsResponse indicesResponse = client().admin().indices().prepareStats("test").execute().actionGet(); - assertThat(indicesResponse.getTotal().getPercolate().getCount(), equalTo((long) numShards.numPrimaries)); - assertThat(indicesResponse.getTotal().getPercolate().getCurrent(), equalTo(0L)); - assertThat(indicesResponse.getTotal().getPercolate().getNumQueries(), equalTo((long)numShards.dataCopies)); //number of copies - assertThat(indicesResponse.getTotal().getPercolate().getMemorySizeInBytes(), equalTo(-1L)); - - NodesStatsResponse nodesResponse = client().admin().cluster().prepareNodesStats().execute().actionGet(); - long percolateCount = 0; - for (NodeStats nodeStats : nodesResponse) { - percolateCount += nodeStats.getIndices().getPercolate().getCount(); - } - assertThat(percolateCount, equalTo((long) numShards.numPrimaries)); - - logger.info("--> Second percolate request"); - response = client().preparePercolate() - .setIndices("test").setDocumentType("type") - .setSource(jsonBuilder().startObject().startObject("doc").field("field", "val").endObject().endObject()) - .execute().actionGet(); - assertMatchCount(response, 1L); - assertThat(response.getMatches(), arrayWithSize(1)); - assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContaining("1")); - - indicesResponse = client().admin().indices().prepareStats().setPercolate(true).execute().actionGet(); - assertThat(indicesResponse.getTotal().getPercolate().getCount(), equalTo((long) numShards.numPrimaries * 2)); - assertThat(indicesResponse.getTotal().getPercolate().getCurrent(), equalTo(0L)); - assertThat(indicesResponse.getTotal().getPercolate().getNumQueries(), equalTo((long)numShards.dataCopies)); //number of copies - assertThat(indicesResponse.getTotal().getPercolate().getMemorySizeInBytes(), equalTo(-1L)); - - percolateCount = 0; - nodesResponse = client().admin().cluster().prepareNodesStats().execute().actionGet(); - for (NodeStats nodeStats : nodesResponse) { - percolateCount += nodeStats.getIndices().getPercolate().getCount(); - } - assertThat(percolateCount, equalTo((long) numShards.numPrimaries *2)); - - // We might be faster than 1 ms, so run upto 1000 times until have spend 1ms or more on percolating - boolean moreThanOneMs = false; - int counter = 3; // We already ran two times. - do { - indicesResponse = client().admin().indices().prepareStats("test").execute().actionGet(); - if (indicesResponse.getTotal().getPercolate().getTimeInMillis() > 0) { - moreThanOneMs = true; - break; - } - - logger.info("--> {}th percolate request", counter); - response = client().preparePercolate() - .setIndices("test").setDocumentType("type") - .setSource(jsonBuilder().startObject().startObject("doc").field("field", "val").endObject().endObject()) - .execute().actionGet(); - assertThat(response.getMatches(), arrayWithSize(1)); - assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContaining("1")); - } while (++counter <= 1000); - assertTrue("Something is off, we should have spent at least 1ms on percolating...", moreThanOneMs); - - long percolateSumTime = 0; - nodesResponse = client().admin().cluster().prepareNodesStats().execute().actionGet(); - for (NodeStats nodeStats : nodesResponse) { - percolateCount += nodeStats.getIndices().getPercolate().getCount(); - percolateSumTime += nodeStats.getIndices().getPercolate().getTimeInMillis(); - } - assertThat(percolateSumTime, greaterThan(0L)); + assertThat(indicesResponse.getTotal().getPercolatorCache().getNumQueries(), equalTo((long)numShards.dataCopies)); // number of copies } public void testPercolatingExistingDocs() throws Exception { @@ -602,19 +538,19 @@ public class PercolatorIT extends ESIntegTestCase { client().prepareIndex("test", "type", "4").setSource("field1", "d").execute().actionGet(); logger.info("--> register a queries"); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "1") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "1") .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject()) .execute().actionGet(); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "2") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "2") .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject()) .execute().actionGet(); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "3") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "3") .setSource(jsonBuilder().startObject().field("query", boolQuery() .must(matchQuery("field1", "b")) .must(matchQuery("field1", "c")) ).endObject()) .execute().actionGet(); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "4") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "4") .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) .execute().actionGet(); refresh(); @@ -667,19 +603,19 @@ public class PercolatorIT extends ESIntegTestCase { client().prepareIndex("test", "type", "4").setSource("field1", "d").setRouting("1").execute().actionGet(); logger.info("--> register a queries"); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "1") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "1") .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject()) .execute().actionGet(); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "2") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "2") .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject()) .execute().actionGet(); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "3") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "3") .setSource(jsonBuilder().startObject().field("query", boolQuery() .must(matchQuery("field1", "b")) .must(matchQuery("field1", "c")) ).endObject()) .execute().actionGet(); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "4") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "4") .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) .execute().actionGet(); refresh(); @@ -732,19 +668,19 @@ public class PercolatorIT extends ESIntegTestCase { client().prepareIndex("test", "type", "4").setSource("field1", "d").execute().actionGet(); logger.info("--> registering queries"); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "1") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "1") .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject()) .execute().actionGet(); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "2") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "2") .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject()) .execute().actionGet(); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "3") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "3") .setSource(jsonBuilder().startObject().field("query", boolQuery() .must(matchQuery("field1", "b")) .must(matchQuery("field1", "c")) ).endObject()) .execute().actionGet(); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "4") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "4") .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) .execute().actionGet(); refresh(); @@ -788,7 +724,7 @@ public class PercolatorIT extends ESIntegTestCase { logger.info("--> registering queries"); for (int i = 1; i <= 10; i++) { String index = i % 2 == 0 ? "test1" : "test2"; - client().prepareIndex(index, PercolatorService.TYPE_NAME, Integer.toString(i)) + client().prepareIndex(index, PercolatorFieldMapper.TYPE_NAME, Integer.toString(i)) .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) .execute().actionGet(); } @@ -861,15 +797,15 @@ public class PercolatorIT extends ESIntegTestCase { public void testPercolateWithAliasFilter() throws Exception { assertAcked(prepareCreate("my-index") - .addMapping(PercolatorService.TYPE_NAME, "a", "type=keyword") + .addMapping(PercolatorFieldMapper.TYPE_NAME, "a", "type=keyword") .addAlias(new Alias("a").filter(QueryBuilders.termQuery("a", "a"))) .addAlias(new Alias("b").filter(QueryBuilders.termQuery("a", "b"))) .addAlias(new Alias("c").filter(QueryBuilders.termQuery("a", "c"))) ); - client().prepareIndex("my-index", PercolatorService.TYPE_NAME, "1") + client().prepareIndex("my-index", PercolatorFieldMapper.TYPE_NAME, "1") .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("a", "a").endObject()) .get(); - client().prepareIndex("my-index", PercolatorService.TYPE_NAME, "2") + client().prepareIndex("my-index", PercolatorFieldMapper.TYPE_NAME, "2") .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("a", "b").endObject()) .get(); refresh(); @@ -944,19 +880,19 @@ public class PercolatorIT extends ESIntegTestCase { client().prepareIndex("test", "type", "1").setSource("field1", "value").execute().actionGet(); logger.info("--> register a queries"); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "1") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "1") .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject()) .execute().actionGet(); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "2") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "2") .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject()) .execute().actionGet(); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "3") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "3") .setSource(jsonBuilder().startObject().field("query", boolQuery() .must(matchQuery("field1", "b")) .must(matchQuery("field1", "c")) ).endObject()) .execute().actionGet(); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "4") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "4") .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) .execute().actionGet(); refresh(); @@ -1000,7 +936,8 @@ public class PercolatorIT extends ESIntegTestCase { .setGetRequest(Requests.getRequest("test").type("type").id("5")) .execute().actionGet(); fail("Exception should have been thrown"); - } catch (DocumentMissingException e) { + } catch (ResourceNotFoundException e) { + assertThat(e.getMessage(), equalTo("percolate document [test/type/5] doesn't exist")); } } @@ -1015,19 +952,19 @@ public class PercolatorIT extends ESIntegTestCase { client().prepareIndex("test", "type", "4").setSource("field1", "d").execute().actionGet(); logger.info("--> register a queries"); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "1") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "1") .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject()) .execute().actionGet(); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "2") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "2") .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject()) .execute().actionGet(); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "3") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "3") .setSource(jsonBuilder().startObject().field("query", boolQuery() .must(matchQuery("field1", "b")) .must(matchQuery("field1", "c")) ).endObject()) .execute().actionGet(); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "4") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "4") .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) .execute().actionGet(); refresh(); @@ -1072,10 +1009,10 @@ public class PercolatorIT extends ESIntegTestCase { int numLevels = randomIntBetween(1, 25); long numQueriesPerLevel = randomIntBetween(10, 250); long totalQueries = numLevels * numQueriesPerLevel; - logger.info("--> register " + totalQueries + " queries"); + logger.info("--> register {} queries", totalQueries); for (int level = 1; level <= numLevels; level++) { for (int query = 1; query <= numQueriesPerLevel; query++) { - client().prepareIndex("my-index", PercolatorService.TYPE_NAME, level + "-" + query) + client().prepareIndex("my-index", PercolatorFieldMapper.TYPE_NAME, level + "-" + query) .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("level", level).endObject()) .execute().actionGet(); } @@ -1166,10 +1103,10 @@ public class PercolatorIT extends ESIntegTestCase { Map> controlMap = new HashMap<>(); long numQueries = randomIntBetween(100, 250); - logger.info("--> register " + numQueries + " queries"); + logger.info("--> register {} queries", numQueries); for (int i = 0; i < numQueries; i++) { int value = randomInt(10); - client().prepareIndex("my-index", PercolatorService.TYPE_NAME, Integer.toString(i)) + client().prepareIndex("my-index", PercolatorFieldMapper.TYPE_NAME, Integer.toString(i)) .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("level", i).field("field1", value).endObject()) .execute().actionGet(); if (!controlMap.containsKey(value)) { @@ -1250,10 +1187,10 @@ public class PercolatorIT extends ESIntegTestCase { createIndex("my-index"); ensureGreen(); - client().prepareIndex("my-index", PercolatorService.TYPE_NAME, "1") + client().prepareIndex("my-index", PercolatorFieldMapper.TYPE_NAME, "1") .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("level", 1).endObject()) .execute().actionGet(); - client().prepareIndex("my-index", PercolatorService.TYPE_NAME, "2") + client().prepareIndex("my-index", PercolatorFieldMapper.TYPE_NAME, "2") .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("level", 2).endObject()) .execute().actionGet(); refresh(); @@ -1271,33 +1208,6 @@ public class PercolatorIT extends ESIntegTestCase { assertThat(response.getMatches()[1].getScore(), equalTo(1f)); } - public void testPercolateSortingUnsupportedField() throws Exception { - client().admin().indices().prepareCreate("my-index") - .addMapping("my-type", "field", "type=text") - .addMapping(PercolatorService.TYPE_NAME, "level", "type=integer", "query", "type=object,enabled=false") - .get(); - ensureGreen(); - - client().prepareIndex("my-index", PercolatorService.TYPE_NAME, "1") - .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("level", 1).endObject()) - .get(); - client().prepareIndex("my-index", PercolatorService.TYPE_NAME, "2") - .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("level", 2).endObject()) - .get(); - refresh(); - - PercolateResponse response = client().preparePercolate().setIndices("my-index").setDocumentType("my-type") - .setSize(2) - .setPercolateDoc(docBuilder().setDoc("field", "value")) - .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("level"))) - .addSort(SortBuilders.fieldSort("level")) - .get(); - - assertThat(response.getShardFailures().length, equalTo(getNumShards("my-index").numPrimaries)); - assertThat(response.getShardFailures()[0].status(), equalTo(RestStatus.BAD_REQUEST)); - assertThat(response.getShardFailures()[0].reason(), containsString("Only _score desc is supported")); - } - public void testPercolateOnEmptyIndex() throws Exception { client().admin().indices().prepareCreate("my-index").execute().actionGet(); ensureGreen(); @@ -1306,7 +1216,7 @@ public class PercolatorIT extends ESIntegTestCase { .setSortByScore(true) .setSize(2) .setPercolateDoc(docBuilder().setDoc("field", "value")) - .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("level"))) + .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("level").missing(0.0))) .execute().actionGet(); assertMatchCount(response, 0L); } @@ -1322,19 +1232,19 @@ public class PercolatorIT extends ESIntegTestCase { assertAcked(prepareCreate("test").addMapping("type", "field1", fieldMapping.toString())); logger.info("--> register a queries"); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "1") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "1") .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "brown fox")).endObject()) .execute().actionGet(); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "2") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "2") .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "lazy dog")).endObject()) .execute().actionGet(); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "3") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "3") .setSource(jsonBuilder().startObject().field("query", termQuery("field1", "jumps")).endObject()) .execute().actionGet(); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "4") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "4") .setSource(jsonBuilder().startObject().field("query", termQuery("field1", "dog")).endObject()) .execute().actionGet(); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "5") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "5") .setSource(jsonBuilder().startObject().field("query", termQuery("field1", "fox")).endObject()) .execute().actionGet(); refresh(); @@ -1525,7 +1435,7 @@ public class PercolatorIT extends ESIntegTestCase { ensureGreen(); logger.info("--> register a query"); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "1") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "1") .setSource(jsonBuilder().startObject() .field("query", QueryBuilders.constantScoreQuery(QueryBuilders.boolQuery() .must(QueryBuilders.queryStringQuery("root")) @@ -1579,10 +1489,10 @@ public class PercolatorIT extends ESIntegTestCase { client().admin().indices().prepareCreate("test").execute().actionGet(); ensureGreen(); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "1") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "1") .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) .get(); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "1") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "1") .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) .get(); refresh(); @@ -1602,7 +1512,7 @@ public class PercolatorIT extends ESIntegTestCase { ensureGreen(); try { - client().prepareIndex("test", PercolatorService.TYPE_NAME) + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME) .setSource(jsonBuilder().startObject().field("query", termQuery("field1", "value")).endObject()) .get(); fail(); @@ -1611,7 +1521,7 @@ public class PercolatorIT extends ESIntegTestCase { } try { - client().prepareIndex("test", PercolatorService.TYPE_NAME) + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME) .setSource(jsonBuilder().startObject().field("query", rangeQuery("field1").from(0).to(1)).endObject()) .get(); fail(); @@ -1626,10 +1536,10 @@ public class PercolatorIT extends ESIntegTestCase { .get(); ensureGreen(); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "1") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "1") .setSource(jsonBuilder().startObject().field("query", rangeQuery("timestamp").from("now-1d").to("now")).endObject()) .get(); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "2") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "2") .setSource(jsonBuilder().startObject().field("query", constantScoreQuery(rangeQuery("timestamp").from("now-1d").to("now"))).endObject()) .get(); refresh(); @@ -1654,7 +1564,7 @@ public class PercolatorIT extends ESIntegTestCase { assertAcked(client().admin().indices().prepareCreate("nestedindex").addMapping("company", mapping)); ensureGreen("nestedindex"); - client().prepareIndex("nestedindex", PercolatorService.TYPE_NAME, "Q").setSource(jsonBuilder().startObject() + client().prepareIndex("nestedindex", PercolatorFieldMapper.TYPE_NAME, "Q").setSource(jsonBuilder().startObject() .field("query", QueryBuilders.nestedQuery("employee", QueryBuilders.matchQuery("employee.name", "virginia potts").operator(Operator.AND)).scoreMode(ScoreMode.Avg)).endObject()).get(); refresh(); @@ -1783,12 +1693,12 @@ public class PercolatorIT extends ESIntegTestCase { "}"; assertAcked(client().admin().indices().prepareCreate("test").addMapping("doc", mapping)); ensureGreen("test"); - client().prepareIndex("test", PercolatorService.TYPE_NAME).setSource(q1).setId("q1").get(); - client().prepareIndex("test", PercolatorService.TYPE_NAME).setSource(q2).setId("q2").get(); - client().prepareIndex("test", PercolatorService.TYPE_NAME).setSource(q3).setId("q3").get(); - client().prepareIndex("test", PercolatorService.TYPE_NAME).setSource(q4).setId("q4").get(); - client().prepareIndex("test", PercolatorService.TYPE_NAME).setSource(q5).setId("q5").get(); - client().prepareIndex("test", PercolatorService.TYPE_NAME).setSource(q6).setId("q6").get(); + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME).setSource(q1).setId("q1").get(); + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME).setSource(q2).setId("q2").get(); + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME).setSource(q3).setId("q3").get(); + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME).setSource(q4).setId("q4").get(); + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME).setSource(q5).setId("q5").get(); + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME).setSource(q6).setId("q6").get(); refresh(); PercolateResponse response = client().preparePercolate() .setIndices("test").setDocumentType("doc") @@ -1822,7 +1732,7 @@ public class PercolatorIT extends ESIntegTestCase { .put("index.percolator.map_unmapped_fields_as_string", true); assertAcked(prepareCreate("test") .setSettings(settings)); - client().prepareIndex("test", PercolatorService.TYPE_NAME) + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME) .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "value")).endObject()).get(); refresh(); logger.info("--> Percolate doc with field1=value"); @@ -1842,7 +1752,7 @@ public class PercolatorIT extends ESIntegTestCase { assertAcked(prepareCreate("test") .setSettings(settings) .addMapping("type", "location", "type=geo_shape")); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "1") + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "1") .setSource(jsonBuilder().startObject().field("query", geoShapeQuery("location", ShapeBuilders.newEnvelope(new Coordinate(0d, 50d), new Coordinate(2d, 40d)))).endObject()) .get(); refresh(); @@ -1878,7 +1788,7 @@ public class PercolatorIT extends ESIntegTestCase { assertAcked(prepareCreate("index").addMapping("mapping", mapping)); try { - client().prepareIndex("index", PercolatorService.TYPE_NAME, "1") + client().prepareIndex("index", PercolatorFieldMapper.TYPE_NAME, "1") .setSource(jsonBuilder().startObject().field("query", nestedQuery("nested", matchQuery("nested.name", "value")).innerHit(new QueryInnerHits())).endObject()) .execute().actionGet(); fail("Expected a parse error, because inner_hits isn't supported in the percolate api"); @@ -1893,14 +1803,14 @@ public class PercolatorIT extends ESIntegTestCase { // the percolate api assertAcked(prepareCreate("index").addMapping("child", "_parent", "type=parent").addMapping("parent")); - client().prepareIndex("index", PercolatorService.TYPE_NAME, "1") + client().prepareIndex("index", PercolatorFieldMapper.TYPE_NAME, "1") .setSource(jsonBuilder().startObject().field("query", hasChildQuery("child", matchAllQuery())).endObject()) .execute().actionGet(); } public void testPercolateDocumentWithParentField() throws Exception { assertAcked(prepareCreate("index").addMapping("child", "_parent", "type=parent").addMapping("parent")); - client().prepareIndex("index", PercolatorService.TYPE_NAME, "1") + client().prepareIndex("index", PercolatorFieldMapper.TYPE_NAME, "1") .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) .execute().actionGet(); refresh(); @@ -1915,7 +1825,7 @@ public class PercolatorIT extends ESIntegTestCase { } public void testFilterByNow() throws Exception { - client().prepareIndex("index", PercolatorService.TYPE_NAME, "1") + client().prepareIndex("index", PercolatorFieldMapper.TYPE_NAME, "1") .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("created", "2015-07-10T14:41:54+0000").endObject()) .get(); refresh(); diff --git a/core/src/test/java/org/elasticsearch/percolator/PercolatorServiceTests.java b/core/src/test/java/org/elasticsearch/percolator/PercolatorServiceTests.java deleted file mode 100644 index 05a4a156a01..00000000000 --- a/core/src/test/java/org/elasticsearch/percolator/PercolatorServiceTests.java +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.percolator; - -import org.apache.lucene.analysis.core.WhitespaceAnalyzer; -import org.apache.lucene.document.FieldType; -import org.apache.lucene.document.StoredField; -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexOptions; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.NoMergePolicy; -import org.apache.lucene.index.Term; -import org.apache.lucene.index.memory.MemoryIndex; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.MatchAllDocsQuery; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.TermQuery; -import org.apache.lucene.search.TopDocs; -import org.apache.lucene.store.Directory; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; -import org.elasticsearch.action.percolate.PercolateShardResponse; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.mapper.ParseContext; -import org.elasticsearch.index.mapper.Uid; -import org.elasticsearch.index.mapper.internal.UidFieldMapper; -import org.elasticsearch.index.percolator.ExtractQueryTermsService; -import org.elasticsearch.index.percolator.PercolatorFieldMapper; -import org.elasticsearch.index.percolator.PercolatorQueriesRegistry; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.search.SearchShardTarget; -import org.elasticsearch.search.internal.ContextIndexSearcher; -import org.elasticsearch.test.ESTestCase; -import org.junit.After; -import org.junit.Before; - -import java.io.IOException; - -import static org.hamcrest.Matchers.equalTo; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class PercolatorServiceTests extends ESTestCase { - - private Directory directory; - private IndexWriter indexWriter; - private DirectoryReader directoryReader; - - @Before - public void init() throws Exception { - directory = newDirectory(); - IndexWriterConfig config = new IndexWriterConfig(new WhitespaceAnalyzer()); - config.setMergePolicy(NoMergePolicy.INSTANCE); - indexWriter = new IndexWriter(directory, config); - } - - @After - public void destroy() throws Exception { - directoryReader.close(); - directory.close(); - } - - public void testCount() throws Exception { - PercolateContext context = mock(PercolateContext.class); - when(context.shardTarget()).thenReturn(new SearchShardTarget("_id", new Index("_index", "_na_"), 0)); - when(context.percolatorTypeFilter()).thenReturn(new MatchAllDocsQuery()); - when(context.isOnlyCount()).thenReturn(true); - IndexShard shard = mock(IndexShard.class); - when(shard.shardId()).thenReturn(new ShardId("_index", "_na_", 0)); - when(context.indexShard()).thenReturn(shard); - - PercolatorQueriesRegistry registry = createRegistry(); - addPercolatorQuery("1", new TermQuery(new Term("field", "brown")), indexWriter, registry); - addPercolatorQuery("2", new TermQuery(new Term("field", "fox")), indexWriter, registry); - addPercolatorQuery("3", new TermQuery(new Term("field", "monkey")), indexWriter, registry); - - indexWriter.close(); - directoryReader = DirectoryReader.open(directory); - IndexSearcher shardSearcher = newSearcher(directoryReader); - when(context.searcher()).thenReturn(new ContextIndexSearcher(new Engine.Searcher("test", shardSearcher), shardSearcher.getQueryCache(), shardSearcher.getQueryCachingPolicy())); - - MemoryIndex memoryIndex = new MemoryIndex(); - memoryIndex.addField("field", "the quick brown fox jumps over the lazy dog", new WhitespaceAnalyzer()); - IndexSearcher percolateSearcher = memoryIndex.createSearcher(); - when(context.docSearcher()).thenReturn(percolateSearcher); - - PercolateShardResponse response = PercolatorService.doPercolate(context, registry, null, null, null); - assertThat(response.topDocs().totalHits, equalTo(2)); - } - - public void testTopMatching() throws Exception { - PercolateContext context = mock(PercolateContext.class); - when(context.shardTarget()).thenReturn(new SearchShardTarget("_id", new Index("_index", "_na_"), 0)); - when(context.percolatorTypeFilter()).thenReturn(new MatchAllDocsQuery()); - when(context.size()).thenReturn(10); - IndexShard shard = mock(IndexShard.class); - when(shard.shardId()).thenReturn(new ShardId("_index", "_na_", 0)); - when(context.indexShard()).thenReturn(shard); - - PercolatorQueriesRegistry registry = createRegistry(); - addPercolatorQuery("1", new TermQuery(new Term("field", "brown")), indexWriter, registry); - addPercolatorQuery("2", new TermQuery(new Term("field", "monkey")), indexWriter, registry); - addPercolatorQuery("3", new TermQuery(new Term("field", "fox")), indexWriter, registry); - - indexWriter.close(); - directoryReader = DirectoryReader.open(directory); - IndexSearcher shardSearcher = newSearcher(directoryReader); - when(context.searcher()).thenReturn(new ContextIndexSearcher(new Engine.Searcher("test", shardSearcher), shardSearcher.getQueryCache(), shardSearcher.getQueryCachingPolicy())); - - MemoryIndex memoryIndex = new MemoryIndex(); - memoryIndex.addField("field", "the quick brown fox jumps over the lazy dog", new WhitespaceAnalyzer()); - IndexSearcher percolateSearcher = memoryIndex.createSearcher(); - when(context.docSearcher()).thenReturn(percolateSearcher); - - PercolateShardResponse response = PercolatorService.doPercolate(context, registry, null, null, null); - TopDocs topDocs = response.topDocs(); - assertThat(topDocs.totalHits, equalTo(2)); - assertThat(topDocs.scoreDocs.length, equalTo(2)); - assertThat(topDocs.scoreDocs[0].doc, equalTo(0)); - assertThat(topDocs.scoreDocs[1].doc, equalTo(2)); - } - - void addPercolatorQuery(String id, Query query, IndexWriter writer, PercolatorQueriesRegistry registry) throws IOException { - registry.getPercolateQueries().put(new BytesRef(id), query); - ParseContext.Document document = new ParseContext.Document(); - FieldType extractedQueryTermsFieldType = new FieldType(); - extractedQueryTermsFieldType.setTokenized(false); - extractedQueryTermsFieldType.setIndexOptions(IndexOptions.DOCS); - extractedQueryTermsFieldType.freeze(); - ExtractQueryTermsService.extractQueryTerms(query, document, PercolatorFieldMapper.EXTRACTED_TERMS_FULL_FIELD_NAME, PercolatorFieldMapper.UNKNOWN_QUERY_FULL_FIELD_NAME, extractedQueryTermsFieldType); - document.add(new StoredField(UidFieldMapper.NAME, Uid.createUid(PercolatorService.TYPE_NAME, id))); - writer.addDocument(document); - } - - PercolatorQueriesRegistry createRegistry() { - Index index = new Index("_index", "_na_"); - IndexSettings indexSettings = new IndexSettings(new IndexMetaData.Builder("_index").settings( - Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) - .build(), Settings.EMPTY); - return new PercolatorQueriesRegistry( - new ShardId(index, 0), - indexSettings, - null - ); - } - -} diff --git a/core/src/test/java/org/elasticsearch/percolator/RecoveryPercolatorIT.java b/core/src/test/java/org/elasticsearch/percolator/RecoveryPercolatorIT.java deleted file mode 100644 index a08eb41236d..00000000000 --- a/core/src/test/java/org/elasticsearch/percolator/RecoveryPercolatorIT.java +++ /dev/null @@ -1,341 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.percolator; - -import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; -import org.elasticsearch.action.percolate.MultiPercolateRequestBuilder; -import org.elasticsearch.action.percolate.MultiPercolateResponse; -import org.elasticsearch.action.percolate.PercolateRequestBuilder; -import org.elasticsearch.action.percolate.PercolateResponse; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.Requests; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.Priority; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.ESIntegTestCase.ClusterScope; -import org.elasticsearch.test.ESIntegTestCase.Scope; - -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicReference; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; -import java.util.function.Predicate; - -import static org.elasticsearch.action.percolate.PercolateSourceBuilder.docBuilder; -import static org.elasticsearch.client.Requests.clusterHealthRequest; -import static org.elasticsearch.common.settings.Settings.settingsBuilder; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.index.query.QueryBuilders.rangeQuery; -import static org.elasticsearch.index.query.QueryBuilders.termQuery; -import static org.elasticsearch.percolator.PercolatorTestUtil.convertFromTextArray; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertMatchCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.hamcrest.Matchers.arrayContainingInAnyOrder; -import static org.hamcrest.Matchers.arrayWithSize; -import static org.hamcrest.Matchers.emptyArray; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.nullValue; - -@ClusterScope(scope = Scope.TEST, numDataNodes = 0, numClientNodes = 0, transportClientRatio = 0) -public class RecoveryPercolatorIT extends ESIntegTestCase { - @Override - protected int numberOfShards() { - return 1; - } - - public void testRestartNodePercolator1() throws Exception { - internalCluster().startNode(); - assertAcked(prepareCreate("test").addMapping("type1", "field1", "type=text").addMapping(PercolatorService.TYPE_NAME, "color", "type=text")); - - logger.info("--> register a query"); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "kuku") - .setSource(jsonBuilder().startObject() - .field("color", "blue") - .field("query", termQuery("field1", "value1")) - .endObject()) - .setRefresh(true) - .get(); - - PercolateResponse percolate = client().preparePercolate() - .setIndices("test").setDocumentType("type1") - .setSource(jsonBuilder().startObject().startObject("doc") - .field("field1", "value1") - .endObject().endObject()) - .get(); - assertThat(percolate.getMatches(), arrayWithSize(1)); - - internalCluster().rollingRestart(); - - logger.info("Running Cluster Health (wait for the shards to startup)"); - ensureYellow(); - - percolate = client().preparePercolate() - .setIndices("test").setDocumentType("type1") - .setSource(jsonBuilder().startObject().startObject("doc") - .field("field1", "value1") - .endObject().endObject()) - .get(); - assertMatchCount(percolate, 1L); - assertThat(percolate.getMatches(), arrayWithSize(1)); - } - - public void testRestartNodePercolator2() throws Exception { - internalCluster().startNode(); - assertAcked(prepareCreate("test").addMapping("type1", "field1", "type=text").addMapping(PercolatorService.TYPE_NAME, "color", "type=text")); - - logger.info("--> register a query"); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "kuku") - .setSource(jsonBuilder().startObject() - .field("color", "blue") - .field("query", termQuery("field1", "value1")) - .endObject()) - .setRefresh(true) - .get(); - - assertThat(client().prepareSearch().setSize(0).setTypes(PercolatorService.TYPE_NAME).setQuery(matchAllQuery()).get().getHits().totalHits(), equalTo(1L)); - - PercolateResponse percolate = client().preparePercolate() - .setIndices("test").setDocumentType("type1") - .setSource(jsonBuilder().startObject().startObject("doc") - .field("field1", "value1") - .endObject().endObject()) - .get(); - assertMatchCount(percolate, 1L); - assertThat(percolate.getMatches(), arrayWithSize(1)); - - internalCluster().rollingRestart(); - - logger.info("Running Cluster Health (wait for the shards to startup)"); - ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet(); - logger.info("Done Cluster Health, status " + clusterHealth.getStatus()); - assertThat(clusterHealth.isTimedOut(), equalTo(false)); - SearchResponse countResponse = client().prepareSearch().setSize(0).setTypes(PercolatorService.TYPE_NAME).setQuery(matchAllQuery()).get(); - assertHitCount(countResponse, 1L); - - DeleteIndexResponse actionGet = client().admin().indices().prepareDelete("test").get(); - assertThat(actionGet.isAcknowledged(), equalTo(true)); - assertAcked(prepareCreate("test").addMapping("type1", "field1", "type=text").addMapping(PercolatorService.TYPE_NAME, "color", "type=text")); - clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet(); - logger.info("Done Cluster Health, status " + clusterHealth.getStatus()); - assertThat(clusterHealth.isTimedOut(), equalTo(false)); - assertThat(client().prepareSearch().setSize(0).setTypes(PercolatorService.TYPE_NAME).setQuery(matchAllQuery()).get().getHits().totalHits(), equalTo(0L)); - - percolate = client().preparePercolate() - .setIndices("test").setDocumentType("type1") - .setSource(jsonBuilder().startObject().startObject("doc") - .field("field1", "value1") - .endObject().endObject()) - .get(); - assertMatchCount(percolate, 0L); - assertThat(percolate.getMatches(), emptyArray()); - - logger.info("--> register a query"); - client().prepareIndex("test", PercolatorService.TYPE_NAME, "kuku") - .setSource(jsonBuilder().startObject() - .field("color", "blue") - .field("query", termQuery("field1", "value1")) - .endObject()) - .setRefresh(true) - .get(); - - assertThat(client().prepareSearch().setSize(0).setTypes(PercolatorService.TYPE_NAME).setQuery(matchAllQuery()).get().getHits().totalHits(), equalTo(1L)); - - percolate = client().preparePercolate() - .setIndices("test").setDocumentType("type1") - .setSource(jsonBuilder().startObject().startObject("doc") - .field("field1", "value1") - .endObject().endObject()) - .get(); - assertMatchCount(percolate, 1L); - assertThat(percolate.getMatches(), arrayWithSize(1)); - } - - public void testLoadingPercolateQueriesDuringCloseAndOpen() throws Exception { - internalCluster().startNode(); - internalCluster().startNode(); - - assertAcked(client().admin().indices().prepareCreate("test") - .setSettings(settingsBuilder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1))); - ensureGreen(); - - logger.info("--> Add dummy docs"); - client().prepareIndex("test", "type1", "1").setSource("field1", 0).get(); - client().prepareIndex("test", "type2", "1").setSource("field1", 1).get(); - - logger.info("--> register a queries"); - for (int i = 1; i <= 100; i++) { - client().prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i)) - .setSource(jsonBuilder().startObject() - .field("query", rangeQuery("field1").from(0).to(i)) - .endObject()) - .get(); - } - refresh(); - - logger.info("--> Percolate doc with field1=95"); - PercolateResponse response = client().preparePercolate() - .setIndices("test").setDocumentType("type1") - .setSource(jsonBuilder().startObject().startObject("doc").field("field1", 95).endObject().endObject()) - .get(); - assertMatchCount(response, 6L); - assertThat(response.getMatches(), arrayWithSize(6)); - assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("95", "96", "97", "98", "99", "100")); - - logger.info("--> Close and open index to trigger percolate queries loading..."); - assertAcked(client().admin().indices().prepareClose("test")); - assertAcked(client().admin().indices().prepareOpen("test")); - ensureGreen(); - - logger.info("--> Percolate doc with field1=100"); - response = client().preparePercolate() - .setIndices("test").setDocumentType("type1") - .setSource(jsonBuilder().startObject().startObject("doc").field("field1", 100).endObject().endObject()).get(); - - assertMatchCount(response, 1L); - assertThat(response.getMatches(), arrayWithSize(1)); - assertThat(response.getMatches()[0].getId().string(), equalTo("100")); - } - - public void testPercolatorRecovery() throws Exception { - // 3 nodes, 2 primary + 2 replicas per primary, so each node should have a copy of the data. - // We only start and stop nodes 2 and 3, so all requests should succeed and never be partial. - internalCluster().startNode(settingsBuilder().put("node.stay", true)); - internalCluster().startNode(settingsBuilder().put("node.stay", false)); - internalCluster().startNode(settingsBuilder().put("node.stay", false)); - ensureGreen(); - client().admin().indices().prepareCreate("test") - .setSettings(settingsBuilder() - .put("index.number_of_shards", 2) - .put("index.number_of_replicas", 2) - ) - .get(); - ensureGreen(); - - final Client client = internalCluster().client(input -> input.getAsBoolean("node.stay", true)); - final int numQueries = randomIntBetween(50, 100); - logger.info("--> register a queries"); - for (int i = 0; i < numQueries; i++) { - client.prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i)) - .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) - .get(); - } - refresh(); - - final String document = "{\"field\" : \"a\"}"; - client.prepareIndex("test", "type", "1") - .setSource(document) - .get(); - - final Lock lock = new ReentrantLock(); - final AtomicBoolean run = new AtomicBoolean(true); - final AtomicReference error = new AtomicReference<>(); - Runnable r = () -> { - try { - while (run.get()) { - PercolateRequestBuilder percolateBuilder = client.preparePercolate() - .setIndices("test").setDocumentType("type").setSize(numQueries); - if (randomBoolean()) { - percolateBuilder.setPercolateDoc(docBuilder().setDoc(document)); - } else { - percolateBuilder.setGetRequest(Requests.getRequest("test").type("type").id("1")); - } - PercolateResponse response; - try { - lock.lock(); - response = percolateBuilder.get(); - } finally { - lock.unlock(); - } - assertNoFailures(response); - assertThat(response.getSuccessfulShards(), equalTo(response.getTotalShards())); - assertThat(response.getCount(), equalTo((long) numQueries)); - assertThat(response.getMatches().length, equalTo(numQueries)); - } - } catch (Throwable t) { - logger.info("Error in percolate thread...", t); - run.set(false); - error.set(t); - } - }; - Thread t = new Thread(r); - t.start(); - Predicate nodePredicate = input -> !input.getAsBoolean("node.stay", false); - try { - // 1 index, 2 primaries, 2 replicas per primary - for (int i = 0; i < 4; i++) { - try { - lock.lock(); - internalCluster().stopRandomNode(nodePredicate); - } finally { - lock.unlock(); - } - client.admin().cluster().prepareHealth("test") - .setWaitForEvents(Priority.LANGUID) - .setTimeout(TimeValue.timeValueMinutes(2)) - .setWaitForYellowStatus() - .setWaitForActiveShards(4) // 2 nodes, so 4 shards (2 primaries, 2 replicas) - .get(); - assertThat(error.get(), nullValue()); - try { - lock.lock(); - internalCluster().stopRandomNode(nodePredicate); - } finally { - lock.unlock(); - } - client.admin().cluster().prepareHealth("test") - .setWaitForEvents(Priority.LANGUID) - .setTimeout(TimeValue.timeValueMinutes(2)) - .setWaitForYellowStatus() - .setWaitForActiveShards(2) // 1 node, so 2 shards (2 primaries, 0 replicas) - .get(); - assertThat(error.get(), nullValue()); - internalCluster().startNode(settingsBuilder().put("node.stay", false)); - client.admin().cluster().prepareHealth("test") - .setWaitForEvents(Priority.LANGUID) - .setTimeout(TimeValue.timeValueMinutes(2)) - .setWaitForYellowStatus() - .setWaitForActiveShards(4) // 2 nodes, so 4 shards (2 primaries, 2 replicas) - .get(); - assertThat(error.get(), nullValue()); - internalCluster().startNode(settingsBuilder().put("node.stay", false)); - client.admin().cluster().prepareHealth("test") - .setWaitForEvents(Priority.LANGUID) - .setTimeout(TimeValue.timeValueMinutes(2)) - .setWaitForGreenStatus() // We're confirm the shard settings, so green instead of yellow - .setWaitForActiveShards(6) // 3 nodes, so 6 shards (2 primaries, 4 replicas) - .get(); - assertThat(error.get(), nullValue()); - } - } finally { - run.set(false); - } - t.join(); - assertThat(error.get(), nullValue()); - } - -} diff --git a/core/src/test/java/org/elasticsearch/percolator/TTLPercolatorIT.java b/core/src/test/java/org/elasticsearch/percolator/TTLPercolatorIT.java deleted file mode 100644 index 52f8ecb4b13..00000000000 --- a/core/src/test/java/org/elasticsearch/percolator/TTLPercolatorIT.java +++ /dev/null @@ -1,206 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.percolator; - -import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; -import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.action.percolate.PercolateResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.index.AlreadyExpiredException; -import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.ESIntegTestCase.ClusterScope; -import org.hamcrest.Matchers; - -import java.io.IOException; -import java.util.concurrent.TimeUnit; - -import static org.elasticsearch.common.settings.Settings.settingsBuilder; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.percolator.PercolatorTestUtil.convertFromTextArray; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertMatchCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.hamcrest.Matchers.arrayContaining; -import static org.hamcrest.Matchers.emptyArray; -import static org.hamcrest.Matchers.equalTo; - -/** - */ -@ClusterScope(scope = ESIntegTestCase.Scope.TEST) -public class TTLPercolatorIT extends ESIntegTestCase { - private static final long PURGE_INTERVAL = 200; - - @Override - protected void beforeIndexDeletion() { - } - - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return settingsBuilder() - .put(super.nodeSettings(nodeOrdinal)) - .put("indices.ttl.interval", PURGE_INTERVAL, TimeUnit.MILLISECONDS) - .build(); - } - - public void testPercolatingWithTimeToLive() throws Exception { - final Client client = client(); - ensureGreen(); - - String percolatorMapping = XContentFactory.jsonBuilder().startObject().startObject(PercolatorService.TYPE_NAME) - .startObject("_ttl").field("enabled", true).endObject() - .startObject("_timestamp").field("enabled", true).endObject() - .endObject().endObject().string(); - - String typeMapping = XContentFactory.jsonBuilder().startObject().startObject("type1") - .startObject("_ttl").field("enabled", true).endObject() - .startObject("_timestamp").field("enabled", true).endObject() - .startObject("properties").startObject("field1").field("type", "text").endObject().endObject() - .endObject().endObject().string(); - - client.admin().indices().prepareCreate("test") - .setSettings(settingsBuilder().put("index.number_of_shards", 2)) - .addMapping(PercolatorService.TYPE_NAME, percolatorMapping) - .addMapping("type1", typeMapping) - .execute().actionGet(); - ensureGreen(); - - final NumShards test = getNumShards("test"); - - long ttl = 1500; - long now = System.currentTimeMillis(); - client.prepareIndex("test", PercolatorService.TYPE_NAME, "kuku").setSource(jsonBuilder() - .startObject() - .startObject("query") - .startObject("term") - .field("field1", "value1") - .endObject() - .endObject() - .endObject() - ).setRefresh(true).setTTL(ttl).execute().actionGet(); - - IndicesStatsResponse response = client.admin().indices().prepareStats("test") - .clear().setIndexing(true) - .execute().actionGet(); - assertThat(response.getIndices().get("test").getTotal().getIndexing().getTotal().getIndexCount(), equalTo((long)test.dataCopies)); - - PercolateResponse percolateResponse = client.preparePercolate() - .setIndices("test").setDocumentType("type1") - .setSource(jsonBuilder() - .startObject() - .startObject("doc") - .field("field1", "value1") - .endObject() - .endObject() - ).execute().actionGet(); - assertNoFailures(percolateResponse); - if (percolateResponse.getMatches().length == 0) { - // OK, ttl + purgeInterval has passed (slow machine or many other tests were running at the same time - GetResponse getResponse = client.prepareGet("test", PercolatorService.TYPE_NAME, "kuku").execute().actionGet(); - assertThat(getResponse.isExists(), equalTo(false)); - response = client.admin().indices().prepareStats("test") - .clear().setIndexing(true) - .execute().actionGet(); - long currentDeleteCount = response.getIndices().get("test").getTotal().getIndexing().getTotal().getDeleteCount(); - assertThat(currentDeleteCount, equalTo((long)test.dataCopies)); - return; - } - - assertThat(convertFromTextArray(percolateResponse.getMatches(), "test"), arrayContaining("kuku")); - long timeSpent = System.currentTimeMillis() - now; - long waitTime = ttl + PURGE_INTERVAL - timeSpent; - if (waitTime >= 0) { - Thread.sleep(waitTime); // Doesn't make sense to check the deleteCount before ttl has expired - } - - // See comment in SimpleTTLTests - logger.info("Checking if the ttl purger has run"); - assertTrue(awaitBusy(() -> { - IndicesStatsResponse indicesStatsResponse = client.admin().indices().prepareStats("test").clear().setIndexing(true).get(); - // TTL deletes one doc, but it is indexed in the primary shard and replica shards - return indicesStatsResponse.getIndices().get("test").getTotal().getIndexing().getTotal().getDeleteCount() == test.dataCopies; - }, 5, TimeUnit.SECONDS)); - - percolateResponse = client.preparePercolate() - .setIndices("test").setDocumentType("type1") - .setSource(jsonBuilder() - .startObject() - .startObject("doc") - .field("field1", "value1") - .endObject() - .endObject() - ).execute().actionGet(); - assertMatchCount(percolateResponse, 0L); - assertThat(percolateResponse.getMatches(), emptyArray()); - } - - public void testEnsureTTLDoesNotCreateIndex() throws IOException, InterruptedException { - ensureGreen(); - client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder() - .put("indices.ttl.interval", 60, TimeUnit.SECONDS) // 60 sec - .build()).get(); - - String typeMapping = XContentFactory.jsonBuilder().startObject().startObject("type1") - .startObject("_ttl").field("enabled", true).endObject() - .endObject().endObject().string(); - - client().admin().indices().prepareCreate("test") - .setSettings(settingsBuilder().put("index.number_of_shards", 1)) - .addMapping("type1", typeMapping) - .execute().actionGet(); - ensureGreen(); - client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder() - .put("indices.ttl.interval", 1, TimeUnit.SECONDS) - .build()).get(); - - for (int i = 0; i < 100; i++) { - logger.debug("index doc {} ", i); - try { - client().prepareIndex("test", "type1", "" + i).setSource(jsonBuilder() - .startObject() - .startObject("query") - .startObject("term") - .field("field1", "value1") - .endObject() - .endObject() - .endObject() - ).setTTL(randomIntBetween(1, 500)).setRefresh(true).execute().actionGet(); - } catch (MapperParsingException e) { - logger.info("failed indexing {}", i, e); - // if we are unlucky the TTL is so small that we see the expiry date is already in the past when - // we parse the doc ignore those... - assertThat(e.getCause(), Matchers.instanceOf(AlreadyExpiredException.class)); - } - - } - refresh(); - assertTrue(awaitBusy(() -> { - IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats("test").clear().setIndexing(true).get(); - logger.debug("delete count [{}]", indicesStatsResponse.getIndices().get("test").getTotal().getIndexing().getTotal().getDeleteCount()); - // TTL deletes one doc, but it is indexed in the primary shard and replica shards - return indicesStatsResponse.getIndices().get("test").getTotal().getIndexing().getTotal().getDeleteCount() != 0; - }, 5, TimeUnit.SECONDS)); - internalCluster().wipeIndices("test"); - client().admin().indices().prepareCreate("test") - .addMapping("type1", typeMapping) - .execute().actionGet(); - } -} diff --git a/core/src/test/java/org/elasticsearch/plugins/PluginCliTests.java b/core/src/test/java/org/elasticsearch/plugins/PluginCliTests.java deleted file mode 100644 index 3a121590083..00000000000 --- a/core/src/test/java/org/elasticsearch/plugins/PluginCliTests.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.plugins; - -import org.elasticsearch.common.cli.CliToolTestCase; - -import static org.elasticsearch.common.cli.CliTool.ExitStatus.OK_AND_EXIT; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.hasItem; -import static org.hamcrest.Matchers.is; - -public class PluginCliTests extends CliToolTestCase { - public void testHelpWorks() throws Exception { - CliToolTestCase.CaptureOutputTerminal terminal = new CliToolTestCase.CaptureOutputTerminal(); - assertThat(new PluginCli(terminal).execute(args("--help")), is(OK_AND_EXIT)); - assertTerminalOutputContainsHelpFile(terminal, "/org/elasticsearch/plugins/plugin.help"); - - terminal.getTerminalOutput().clear(); - assertThat(new PluginCli(terminal).execute(args("install -h")), is(OK_AND_EXIT)); - assertTerminalOutputContainsHelpFile(terminal, "/org/elasticsearch/plugins/plugin-install.help"); - for (String plugin : InstallPluginCommand.OFFICIAL_PLUGINS) { - assertThat(terminal.getTerminalOutput(), hasItem(containsString(plugin))); - } - - terminal.getTerminalOutput().clear(); - assertThat(new PluginCli(terminal).execute(args("remove --help")), is(OK_AND_EXIT)); - assertTerminalOutputContainsHelpFile(terminal, "/org/elasticsearch/plugins/plugin-remove.help"); - - terminal.getTerminalOutput().clear(); - assertThat(new PluginCli(terminal).execute(args("list -h")), is(OK_AND_EXIT)); - assertTerminalOutputContainsHelpFile(terminal, "/org/elasticsearch/plugins/plugin-list.help"); - } -} diff --git a/core/src/test/java/org/elasticsearch/plugins/PluginInfoTests.java b/core/src/test/java/org/elasticsearch/plugins/PluginInfoTests.java index 37a0f4e358e..04bff31057d 100644 --- a/core/src/test/java/org/elasticsearch/plugins/PluginInfoTests.java +++ b/core/src/test/java/org/elasticsearch/plugins/PluginInfoTests.java @@ -176,12 +176,12 @@ public class PluginInfoTests extends ESTestCase { "description", "fake desc", "name", "my_plugin", "version", "1.0", - "elasticsearch.version", Version.V_1_7_0.toString()); + "elasticsearch.version", Version.V_2_0_0.toString()); try { PluginInfo.readFromProperties(pluginDir); fail("expected old elasticsearch version exception"); } catch (IllegalArgumentException e) { - assertTrue(e.getMessage().contains("Was designed for version [1.7.0]")); + assertTrue(e.getMessage().contains("Was designed for version [2.0.0]")); } } diff --git a/core/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java b/core/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java index a47217e3048..4b514763f72 100644 --- a/core/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java +++ b/core/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java @@ -165,7 +165,7 @@ public class RecoveriesCollectionTests extends ESSingleNodeTestCase { long startRecovery(RecoveriesCollection collection, RecoveryTargetService.RecoveryListener listener, TimeValue timeValue) { IndicesService indexServices = getInstanceFromNode(IndicesService.class); - IndexShard indexShard = indexServices.indexServiceSafe("test").getShardOrNull(0); + IndexShard indexShard = indexServices.indexServiceSafe(resolveIndex("test")).getShardOrNull(0); final DiscoveryNode sourceNode = new DiscoveryNode("id", DummyTransportAddress.INSTANCE, Version.CURRENT); return collection.startRecovery(indexShard, sourceNode, listener, timeValue); } diff --git a/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java b/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java index 1fd44959a59..7e455913151 100644 --- a/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java +++ b/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java @@ -27,13 +27,13 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; diff --git a/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java b/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java index 16f27655055..b441dd32c78 100644 --- a/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java +++ b/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java @@ -123,7 +123,7 @@ public class TruncatedRecoveryIT extends ESIntegTestCase { public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException { if (action.equals(RecoveryTargetService.Actions.FILE_CHUNK)) { RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request; - logger.debug("file chunk [" + req.toString() + "] lastChunk: " + req.lastChunk()); + logger.debug("file chunk [{}] lastChunk: {}", req, req.lastChunk()); if ((req.name().endsWith("cfs") || req.name().endsWith("fdt")) && req.lastChunk() && truncate.get()) { latch.countDown(); throw new RuntimeException("Caused some truncated files for fun and profit"); diff --git a/core/src/test/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeActionTests.java b/core/src/test/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeActionTests.java new file mode 100644 index 00000000000..34e8315372b --- /dev/null +++ b/core/src/test/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeActionTests.java @@ -0,0 +1,98 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.rest.action.admin.indices.analyze; + +import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; +import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.startsWith; + +public class RestAnalyzeActionTests extends ESTestCase { + + public void testParseXContentForAnalyzeRequest() throws Exception { + BytesReference content = XContentFactory.jsonBuilder() + .startObject() + .field("text", "THIS IS A TEST") + .field("tokenizer", "keyword") + .array("filters", "lowercase") + .endObject().bytes(); + + AnalyzeRequest analyzeRequest = new AnalyzeRequest("for test"); + + RestAnalyzeAction.buildFromContent(content, analyzeRequest, new ParseFieldMatcher(Settings.EMPTY)); + + assertThat(analyzeRequest.text().length, equalTo(1)); + assertThat(analyzeRequest.text(), equalTo(new String[]{"THIS IS A TEST"})); + assertThat(analyzeRequest.tokenizer(), equalTo("keyword")); + assertThat(analyzeRequest.tokenFilters(), equalTo(new String[]{"lowercase"})); + } + + public void testParseXContentForAnalyzeRequestWithInvalidJsonThrowsException() throws Exception { + AnalyzeRequest analyzeRequest = new AnalyzeRequest("for test"); + + try { + RestAnalyzeAction.buildFromContent(new BytesArray("{invalid_json}"), analyzeRequest, new ParseFieldMatcher(Settings.EMPTY)); + fail("shouldn't get here"); + } catch (Exception e) { + assertThat(e, instanceOf(IllegalArgumentException.class)); + assertThat(e.getMessage(), equalTo("Failed to parse request body")); + } + } + + public void testParseXContentForAnalyzeRequestWithUnknownParamThrowsException() throws Exception { + AnalyzeRequest analyzeRequest = new AnalyzeRequest("for test"); + BytesReference invalidContent = XContentFactory.jsonBuilder() + .startObject() + .field("text", "THIS IS A TEST") + .field("unknown", "keyword") + .endObject().bytes(); + + try { + RestAnalyzeAction.buildFromContent(invalidContent, analyzeRequest, new ParseFieldMatcher(Settings.EMPTY)); + fail("shouldn't get here"); + } catch (Exception e) { + assertThat(e, instanceOf(IllegalArgumentException.class)); + assertThat(e.getMessage(), startsWith("Unknown parameter [unknown]")); + } + } + + public void testParseXContentForAnalyzeRequestWithInvalidStringExplainParamThrowsException() throws Exception { + AnalyzeRequest analyzeRequest = new AnalyzeRequest("for test"); + BytesReference invalidExplain = XContentFactory.jsonBuilder() + .startObject() + .field("explain", "fals") + .endObject().bytes(); + try { + RestAnalyzeAction.buildFromContent(invalidExplain, analyzeRequest, new ParseFieldMatcher(Settings.EMPTY)); + fail("shouldn't get here"); + } catch (Exception e) { + assertThat(e, instanceOf(IllegalArgumentException.class)); + assertThat(e.getMessage(), startsWith("explain must be either 'true' or 'false'")); + } + } + + +} diff --git a/core/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java b/core/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java new file mode 100644 index 00000000000..848c62ab2b4 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java @@ -0,0 +1,187 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action.cat; + +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; +import org.elasticsearch.cluster.metadata.SnapshotId; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.RestoreSource; +import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.Table; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.recovery.RecoveryState; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.test.ESTestCase; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import static org.elasticsearch.mock.orig.Mockito.when; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.mockito.Mockito.mock; + +public class RestRecoveryActionTests extends ESTestCase { + + public void testRestRecoveryAction() { + final Settings settings = Settings.EMPTY; + final RestController restController = new RestController(settings); + final RestRecoveryAction action = new RestRecoveryAction(settings, restController, restController, null); + final int totalShards = randomIntBetween(1, 32); + final int successfulShards = Math.max(0, totalShards - randomIntBetween(1, 2)); + final int failedShards = totalShards - successfulShards; + final boolean detailed = randomBoolean(); + final Map> shardRecoveryStates = new HashMap<>(); + final List recoveryStates = new ArrayList<>(); + + for (int i = 0; i < successfulShards; i++) { + final RecoveryState state = mock(RecoveryState.class); + when(state.getShardId()).thenReturn(new ShardId(new Index("index", "_na_"), i)); + final RecoveryState.Timer timer = mock(RecoveryState.Timer.class); + when(timer.time()).thenReturn((long)randomIntBetween(1000000, 10 * 1000000)); + when(state.getTimer()).thenReturn(timer); + when(state.getType()).thenReturn(randomFrom(RecoveryState.Type.values())); + when(state.getStage()).thenReturn(randomFrom(RecoveryState.Stage.values())); + final DiscoveryNode sourceNode = randomBoolean() ? mock(DiscoveryNode.class) : null; + if (sourceNode != null) { + when(sourceNode.getHostName()).thenReturn(randomAsciiOfLength(8)); + } + when(state.getSourceNode()).thenReturn(sourceNode); + final DiscoveryNode targetNode = mock(DiscoveryNode.class); + when(targetNode.getHostName()).thenReturn(randomAsciiOfLength(8)); + when(state.getTargetNode()).thenReturn(targetNode); + + final RestoreSource restoreSource = randomBoolean() ? mock(RestoreSource.class) : null; + if (restoreSource != null) { + final SnapshotId snapshotId = mock(SnapshotId.class); + when(snapshotId.getRepository()).thenReturn(randomAsciiOfLength(8)); + when(snapshotId.getSnapshot()).thenReturn(randomAsciiOfLength(8)); + when(restoreSource.snapshotId()).thenReturn(snapshotId); + } + + RecoveryState.Index index = mock(RecoveryState.Index.class); + + final int totalRecoveredFiles = randomIntBetween(1, 64); + when(index.totalRecoverFiles()).thenReturn(totalRecoveredFiles); + final int recoveredFileCount = randomIntBetween(0, totalRecoveredFiles); + when(index.recoveredFileCount()).thenReturn(recoveredFileCount); + when(index.recoveredFilesPercent()).thenReturn((100f * recoveredFileCount) / totalRecoveredFiles); + when(index.totalFileCount()).thenReturn(randomIntBetween(totalRecoveredFiles, 2 * totalRecoveredFiles)); + + final int totalRecoveredBytes = randomIntBetween(1, 1 << 24); + when(index.totalRecoverBytes()).thenReturn((long)totalRecoveredBytes); + final int recoveredBytes = randomIntBetween(0, totalRecoveredBytes); + when(index.recoveredBytes()).thenReturn((long)recoveredBytes); + when(index.recoveredBytesPercent()).thenReturn((100f * recoveredBytes) / totalRecoveredBytes); + when(index.totalRecoverBytes()).thenReturn((long)randomIntBetween(totalRecoveredBytes, 2 * totalRecoveredBytes)); + when(state.getIndex()).thenReturn(index); + + final RecoveryState.Translog translog = mock(RecoveryState.Translog.class); + final int translogOps = randomIntBetween(0, 1 << 18); + when(translog.totalOperations()).thenReturn(translogOps); + final int translogOpsRecovered = randomIntBetween(0, translogOps); + when(translog.recoveredOperations()).thenReturn(translogOpsRecovered); + when(translog.recoveredPercent()).thenReturn(translogOps == 0 ? 100f : (100f * translogOpsRecovered / translogOps)); + when(state.getTranslog()).thenReturn(translog); + + recoveryStates.add(state); + } + + final List shuffle = new ArrayList<>(recoveryStates); + Randomness.shuffle(shuffle); + shardRecoveryStates.put("index", shuffle); + + final List shardFailures = new ArrayList<>(); + final RecoveryResponse response = new RecoveryResponse( + totalShards, + successfulShards, + failedShards, + detailed, + shardRecoveryStates, + shardFailures); + final Table table = action.buildRecoveryTable(null, response); + + assertNotNull(table); + + List headers = table.getHeaders(); + assertThat(headers.get(0).value, equalTo("index")); + assertThat(headers.get(1).value, equalTo("shard")); + assertThat(headers.get(2).value, equalTo("time")); + assertThat(headers.get(3).value, equalTo("type")); + assertThat(headers.get(4).value, equalTo("stage")); + assertThat(headers.get(5).value, equalTo("source_host")); + assertThat(headers.get(6).value, equalTo("target_host")); + assertThat(headers.get(7).value, equalTo("repository")); + assertThat(headers.get(8).value, equalTo("snapshot")); + assertThat(headers.get(9).value, equalTo("files")); + assertThat(headers.get(10).value, equalTo("files_recovered")); + assertThat(headers.get(11).value, equalTo("files_percent")); + assertThat(headers.get(12).value, equalTo("files_total")); + assertThat(headers.get(13).value, equalTo("bytes")); + assertThat(headers.get(14).value, equalTo("bytes_recovered")); + assertThat(headers.get(15).value, equalTo("bytes_percent")); + assertThat(headers.get(16).value, equalTo("bytes_total")); + assertThat(headers.get(17).value, equalTo("translog_ops")); + assertThat(headers.get(18).value, equalTo("translog_ops_recovered")); + assertThat(headers.get(19).value, equalTo("translog_ops_percent")); + + assertThat(table.getRows().size(), equalTo(successfulShards)); + for (int i = 0; i < successfulShards; i++) { + final RecoveryState state = recoveryStates.get(i); + List cells = table.getRows().get(i); + assertThat(cells.get(0).value, equalTo("index")); + assertThat(cells.get(1).value, equalTo(i)); + assertThat(cells.get(2).value, equalTo(new TimeValue(state.getTimer().time()))); + assertThat(cells.get(3).value, equalTo(state.getType().name().toLowerCase(Locale.ROOT))); + assertThat(cells.get(4).value, equalTo(state.getStage().name().toLowerCase(Locale.ROOT))); + assertThat(cells.get(5).value, equalTo(state.getSourceNode() == null ? "n/a" : state.getSourceNode().getHostName())); + assertThat(cells.get(6).value, equalTo(state.getTargetNode().getHostName())); + assertThat( + cells.get(7).value, + equalTo(state.getRestoreSource() == null ? "n/a" : state.getRestoreSource().snapshotId().getRepository())); + assertThat( + cells.get(8).value, + equalTo(state.getRestoreSource() == null ? "n/a" : state.getRestoreSource().snapshotId().getSnapshot())); + assertThat(cells.get(9).value, equalTo(state.getIndex().totalRecoverFiles())); + assertThat(cells.get(10).value, equalTo(state.getIndex().recoveredFileCount())); + assertThat(cells.get(11).value, equalTo(percent(state.getIndex().recoveredFilesPercent()))); + assertThat(cells.get(12).value, equalTo(state.getIndex().totalFileCount())); + assertThat(cells.get(13).value, equalTo(state.getIndex().totalRecoverBytes())); + assertThat(cells.get(14).value, equalTo(state.getIndex().recoveredBytes())); + assertThat(cells.get(15).value, equalTo(percent(state.getIndex().recoveredBytesPercent()))); + assertThat(cells.get(16).value, equalTo(state.getIndex().totalBytes())); + assertThat(cells.get(17).value, equalTo(state.getTranslog().totalOperations())); + assertThat(cells.get(18).value, equalTo(state.getTranslog().recoveredOperations())); + assertThat(cells.get(19).value, equalTo(percent(state.getTranslog().recoveredPercent()))); + } + } + + private static String percent(float percent) { + return String.format(Locale.ROOT, "%1.1f%%", percent); + } + +} diff --git a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java index 0825da4d4df..a369b44e2b1 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java @@ -122,26 +122,21 @@ public class ScriptServiceTests extends ESTestCase { } public void testScriptsWithoutExtensions() throws IOException { - buildScriptService(Settings.EMPTY); - logger.info("--> setup two test files one with extension and another without"); Path testFileNoExt = scriptsFilePath.resolve("test_no_ext"); Path testFileWithExt = scriptsFilePath.resolve("test_script.tst"); Streams.copy("test_file_no_ext".getBytes("UTF-8"), Files.newOutputStream(testFileNoExt)); Streams.copy("test_file".getBytes("UTF-8"), Files.newOutputStream(testFileWithExt)); resourceWatcherService.notifyNow(); - logger.info("--> verify that file with extension was correctly processed"); CompiledScript compiledScript = scriptService.compile(new Script("test_script", ScriptType.FILE, "test", null), ScriptContext.Standard.SEARCH, Collections.emptyMap()); assertThat(compiledScript.compiled(), equalTo((Object) "compiled_test_file")); - logger.info("--> delete both files"); Files.delete(testFileNoExt); Files.delete(testFileWithExt); resourceWatcherService.notifyNow(); - logger.info("--> verify that file with extension was correctly removed"); try { scriptService.compile(new Script("test_script", ScriptType.FILE, "test", null), ScriptContext.Standard.SEARCH, Collections.emptyMap()); @@ -151,6 +146,25 @@ public class ScriptServiceTests extends ESTestCase { } } + public void testScriptCompiledOnceHiddenFileDetected() throws IOException { + buildScriptService(Settings.EMPTY); + + Path testHiddenFile = scriptsFilePath.resolve(".hidden_file"); + Streams.copy("test_hidden_file".getBytes("UTF-8"), Files.newOutputStream(testHiddenFile)); + + Path testFileScript = scriptsFilePath.resolve("file_script.tst"); + Streams.copy("test_file_script".getBytes("UTF-8"), Files.newOutputStream(testFileScript)); + resourceWatcherService.notifyNow(); + + CompiledScript compiledScript = scriptService.compile(new Script("file_script", ScriptType.FILE, "test", null), + ScriptContext.Standard.SEARCH, Collections.emptyMap()); + assertThat(compiledScript.compiled(), equalTo((Object) "compiled_test_file_script")); + + Files.delete(testHiddenFile); + Files.delete(testFileScript); + resourceWatcherService.notifyNow(); + } + public void testInlineScriptCompiledOnceCache() throws IOException { buildScriptService(Settings.EMPTY); CompiledScript compiledScript1 = scriptService.compile(new Script("1+1", ScriptType.INLINE, "test", null), diff --git a/core/src/test/java/org/elasticsearch/search/SearchModuleTests.java b/core/src/test/java/org/elasticsearch/search/SearchModuleTests.java index b75d9634f69..97bed01369b 100644 --- a/core/src/test/java/org/elasticsearch/search/SearchModuleTests.java +++ b/core/src/test/java/org/elasticsearch/search/SearchModuleTests.java @@ -48,7 +48,7 @@ public class SearchModuleTests extends ModuleTestCase { } try { - module.registerSuggester("term", PhraseSuggester.class); + module.registerSuggester("term", PhraseSuggester.PROTOTYPE); } catch (IllegalArgumentException e) { assertEquals(e.getMessage(), "Can't register the same [suggester] more than once for [term]"); } @@ -56,9 +56,9 @@ public class SearchModuleTests extends ModuleTestCase { public void testRegisterSuggester() { SearchModule module = new SearchModule(Settings.EMPTY, new NamedWriteableRegistry()); - module.registerSuggester("custom", CustomSuggester.class); + module.registerSuggester("custom", CustomSuggester.PROTOTYPE); try { - module.registerSuggester("custom", CustomSuggester.class); + module.registerSuggester("custom", CustomSuggester.PROTOTYPE); } catch (IllegalArgumentException e) { assertEquals(e.getMessage(), "Can't register the same [suggester] more than once for [custom]"); } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/AggregatorParsingTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/AggregatorParsingTests.java index bc6393986b2..4cd69ef604b 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/AggregatorParsingTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/AggregatorParsingTests.java @@ -20,10 +20,10 @@ package org.elasticsearch.search.aggregations; import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.inject.AbstractModule; @@ -59,7 +59,6 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.VersionUtils; -import org.elasticsearch.test.cluster.TestClusterService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPoolModule; import org.junit.AfterClass; @@ -75,6 +74,8 @@ import java.util.Set; import java.util.regex.Matcher; import java.util.regex.Pattern; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.createClusterService; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.setState; import static org.hamcrest.Matchers.containsString; public class AggregatorParsingTests extends ESTestCase { @@ -110,8 +111,9 @@ public class AggregatorParsingTests extends ESTestCase { namedWriteableRegistry = new NamedWriteableRegistry(); index = new Index(randomAsciiOfLengthBetween(1, 10), "_na_"); Settings indexSettings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); - final TestClusterService clusterService = new TestClusterService(); - clusterService.setState(new ClusterState.Builder(clusterService.state()).metaData(new MetaData.Builder() + final ThreadPool threadPool = new ThreadPool(settings); + final ClusterService clusterService = createClusterService(threadPool); + setState(clusterService, new ClusterState.Builder(clusterService.state()).metaData(new MetaData.Builder() .put(new IndexMetaData.Builder(index.getName()).settings(indexSettings).numberOfShards(1).numberOfReplicas(0)))); SettingsModule settingsModule = new SettingsModule(settings); settingsModule.registerSetting(InternalSettingsPlugin.VERSION_CREATED); @@ -146,7 +148,7 @@ public class AggregatorParsingTests extends ESTestCase { }; scriptModule.prepareSettings(settingsModule); injector = new ModulesBuilder().add(new EnvironmentModule(new Environment(settings)), settingsModule, - new ThreadPoolModule(new ThreadPool(settings)), scriptModule, new IndicesModule() { + new ThreadPoolModule(threadPool), scriptModule, new IndicesModule() { @Override protected void configure() { @@ -186,6 +188,7 @@ public class AggregatorParsingTests extends ESTestCase { @AfterClass public static void afterClass() throws Exception { + injector.getInstance(ClusterService.class).close(); terminate(injector.getInstance(ThreadPool.class)); injector = null; index = null; @@ -196,20 +199,20 @@ public class AggregatorParsingTests extends ESTestCase { public void testTwoTypes() throws Exception { String source = JsonXContent.contentBuilder() - .startObject() - .startObject("in_stock") - .startObject("filter") - .startObject("range") - .startObject("stock") - .field("gt", 0) - .endObject() - .endObject() - .endObject() - .startObject("terms") - .field("field", "stock") - .endObject() - .endObject() - .endObject().string(); + .startObject() + .startObject("in_stock") + .startObject("filter") + .startObject("range") + .startObject("stock") + .field("gt", 0) + .endObject() + .endObject() + .endObject() + .startObject("terms") + .field("field", "stock") + .endObject() + .endObject() + .endObject().string(); try { XContentParser parser = XContentFactory.xContent(source).createParser(source); QueryParseContext parseContext = new QueryParseContext(queriesRegistry); @@ -225,27 +228,27 @@ public class AggregatorParsingTests extends ESTestCase { public void testTwoAggs() throws Exception { String source = JsonXContent.contentBuilder() - .startObject() - .startObject("by_date") - .startObject("date_histogram") - .field("field", "timestamp") - .field("interval", "month") - .endObject() - .startObject("aggs") - .startObject("tag_count") - .startObject("cardinality") - .field("field", "tag") - .endObject() - .endObject() - .endObject() - .startObject("aggs") // 2nd "aggs": illegal - .startObject("tag_count2") - .startObject("cardinality") - .field("field", "tag") - .endObject() - .endObject() - .endObject() - .endObject().string(); + .startObject() + .startObject("by_date") + .startObject("date_histogram") + .field("field", "timestamp") + .field("interval", "month") + .endObject() + .startObject("aggs") + .startObject("tag_count") + .startObject("cardinality") + .field("field", "tag") + .endObject() + .endObject() + .endObject() + .startObject("aggs") // 2nd "aggs": illegal + .startObject("tag_count2") + .startObject("cardinality") + .field("field", "tag") + .endObject() + .endObject() + .endObject() + .endObject().string(); try { XContentParser parser = XContentFactory.xContent(source).createParser(source); QueryParseContext parseContext = new QueryParseContext(queriesRegistry); @@ -276,16 +279,16 @@ public class AggregatorParsingTests extends ESTestCase { } String source = JsonXContent.contentBuilder() - .startObject() - .startObject(name) - .startObject("filter") - .startObject("range") - .startObject("stock") - .field("gt", 0) - .endObject() - .endObject() - .endObject() - .endObject().string(); + .startObject() + .startObject(name) + .startObject("filter") + .startObject("range") + .startObject("stock") + .field("gt", 0) + .endObject() + .endObject() + .endObject() + .endObject().string(); try { XContentParser parser = XContentFactory.xContent(source).createParser(source); QueryParseContext parseContext = new QueryParseContext(queriesRegistry); @@ -302,18 +305,18 @@ public class AggregatorParsingTests extends ESTestCase { public void testSameAggregationName() throws Exception { final String name = randomAsciiOfLengthBetween(1, 10); String source = JsonXContent.contentBuilder() - .startObject() - .startObject(name) - .startObject("terms") - .field("field", "a") - .endObject() - .endObject() - .startObject(name) - .startObject("terms") - .field("field", "b") - .endObject() - .endObject() - .endObject().string(); + .startObject() + .startObject(name) + .startObject("terms") + .field("field", "a") + .endObject() + .endObject() + .startObject(name) + .startObject("terms") + .field("field", "b") + .endObject() + .endObject() + .endObject().string(); try { XContentParser parser = XContentFactory.xContent(source).createParser(source); QueryParseContext parseContext = new QueryParseContext(queriesRegistry); @@ -329,21 +332,21 @@ public class AggregatorParsingTests extends ESTestCase { public void testMissingName() throws Exception { String source = JsonXContent.contentBuilder() - .startObject() - .startObject("by_date") - .startObject("date_histogram") - .field("field", "timestamp") - .field("interval", "month") - .endObject() - .startObject("aggs") - // the aggregation name is missing - //.startObject("tag_count") - .startObject("cardinality") - .field("field", "tag") - .endObject() - //.endObject() - .endObject() - .endObject().string(); + .startObject() + .startObject("by_date") + .startObject("date_histogram") + .field("field", "timestamp") + .field("interval", "month") + .endObject() + .startObject("aggs") + // the aggregation name is missing + //.startObject("tag_count") + .startObject("cardinality") + .field("field", "tag") + .endObject() + //.endObject() + .endObject() + .endObject().string(); try { XContentParser parser = XContentFactory.xContent(source).createParser(source); QueryParseContext parseContext = new QueryParseContext(queriesRegistry); @@ -359,21 +362,21 @@ public class AggregatorParsingTests extends ESTestCase { public void testMissingType() throws Exception { String source = JsonXContent.contentBuilder() - .startObject() - .startObject("by_date") - .startObject("date_histogram") - .field("field", "timestamp") - .field("interval", "month") - .endObject() - .startObject("aggs") - .startObject("tag_count") - // the aggregation type is missing - //.startObject("cardinality") - .field("field", "tag") - //.endObject() - .endObject() - .endObject() - .endObject().string(); + .startObject() + .startObject("by_date") + .startObject("date_histogram") + .field("field", "timestamp") + .field("interval", "month") + .endObject() + .startObject("aggs") + .startObject("tag_count") + // the aggregation type is missing + //.startObject("cardinality") + .field("field", "tag") + //.endObject() + .endObject() + .endObject() + .endObject().string(); try { XContentParser parser = XContentFactory.xContent(source).createParser(source); QueryParseContext parseContext = new QueryParseContext(queriesRegistry); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java b/core/src/test/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java index eeffbb73f8b..4a12072da44 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java @@ -20,10 +20,10 @@ package org.elasticsearch.search.aggregations; import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Injector; @@ -60,7 +60,6 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.VersionUtils; -import org.elasticsearch.test.cluster.TestClusterService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPoolModule; import org.junit.AfterClass; @@ -73,6 +72,8 @@ import java.util.HashSet; import java.util.List; import java.util.Set; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.createClusterService; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.setState; import static org.hamcrest.Matchers.equalTo; public abstract class BaseAggregationTestCase> extends ESTestCase { @@ -83,8 +84,8 @@ public abstract class BaseAggregationTestCase> protected static final String BOOLEAN_FIELD_NAME = "mapped_boolean"; protected static final String DATE_FIELD_NAME = "mapped_date"; protected static final String OBJECT_FIELD_NAME = "mapped_object"; - protected static final String[] mappedFieldNames = new String[] { STRING_FIELD_NAME, INT_FIELD_NAME, - DOUBLE_FIELD_NAME, BOOLEAN_FIELD_NAME, DATE_FIELD_NAME, OBJECT_FIELD_NAME }; + protected static final String[] mappedFieldNames = new String[]{STRING_FIELD_NAME, INT_FIELD_NAME, + DOUBLE_FIELD_NAME, BOOLEAN_FIELD_NAME, DATE_FIELD_NAME, OBJECT_FIELD_NAME}; private static Injector injector; private static Index index; @@ -117,11 +118,12 @@ public abstract class BaseAggregationTestCase> .put(ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING.getKey(), false) .build(); - namedWriteableRegistry = new NamedWriteableRegistry(); + namedWriteableRegistry = new NamedWriteableRegistry(); index = new Index(randomAsciiOfLengthBetween(1, 10), "_na_"); Settings indexSettings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); - final TestClusterService clusterService = new TestClusterService(); - clusterService.setState(new ClusterState.Builder(clusterService.state()).metaData(new MetaData.Builder() + final ThreadPool threadPool = new ThreadPool(settings); + final ClusterService clusterService = createClusterService(threadPool); + setState(clusterService, new ClusterState.Builder(clusterService.state()).metaData(new MetaData.Builder() .put(new IndexMetaData.Builder(index.getName()).settings(indexSettings).numberOfShards(1).numberOfReplicas(0)))); SettingsModule settingsModule = new SettingsModule(settings); settingsModule.registerSetting(InternalSettingsPlugin.VERSION_CREATED); @@ -158,7 +160,7 @@ public abstract class BaseAggregationTestCase> injector = new ModulesBuilder().add( new EnvironmentModule(new Environment(settings)), settingsModule, - new ThreadPoolModule(new ThreadPool(settings)), + new ThreadPoolModule(threadPool), scriptModule, new IndicesModule() { @@ -171,6 +173,7 @@ public abstract class BaseAggregationTestCase> protected void configureSearch() { // Skip me } + @Override protected void configureSuggesters() { // Skip me @@ -200,6 +203,7 @@ public abstract class BaseAggregationTestCase> @AfterClass public static void afterClass() throws Exception { + injector.getInstance(ClusterService.class).close(); terminate(injector.getInstance(ThreadPool.class)); injector = null; index = null; @@ -249,7 +253,7 @@ public abstract class BaseAggregationTestCase> testAgg.writeTo(output); try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(output.bytes()), namedWriteableRegistry)) { AggregatorBuilder prototype = (AggregatorBuilder) namedWriteableRegistry.getPrototype(AggregatorBuilder.class, - testAgg.getWriteableName()); + testAgg.getWriteableName()); AggregatorBuilder deserializedQuery = prototype.readFrom(in); assertEquals(deserializedQuery, testAgg); assertEquals(deserializedQuery.hashCode(), testAgg.hashCode()); @@ -291,7 +295,7 @@ public abstract class BaseAggregationTestCase> agg.writeTo(output); try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(output.bytes()), namedWriteableRegistry)) { AggregatorBuilder prototype = (AggregatorBuilder) namedWriteableRegistry.getPrototype(AggregatorBuilder.class, - agg.getWriteableName()); + agg.getWriteableName()); @SuppressWarnings("unchecked") AB secondAgg = (AB) prototype.readFrom(in); return secondAgg; @@ -309,7 +313,7 @@ public abstract class BaseAggregationTestCase> } } else { if (randomBoolean()) { - types = new String[] { MetaData.ALL }; + types = new String[]{MetaData.ALL}; } else { types = new String[0]; } @@ -320,13 +324,13 @@ public abstract class BaseAggregationTestCase> public String randomNumericField() { int randomInt = randomInt(3); switch (randomInt) { - case 0: - return DATE_FIELD_NAME; - case 1: - return DOUBLE_FIELD_NAME; - case 2: - default: - return INT_FIELD_NAME; + case 0: + return DATE_FIELD_NAME; + case 1: + return DOUBLE_FIELD_NAME; + case 2: + default: + return INT_FIELD_NAME; } } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/BasePipelineAggregationTestCase.java b/core/src/test/java/org/elasticsearch/search/aggregations/BasePipelineAggregationTestCase.java index f180ab57162..cac2100ddde 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/BasePipelineAggregationTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/BasePipelineAggregationTestCase.java @@ -20,10 +20,10 @@ package org.elasticsearch.search.aggregations; import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Injector; @@ -61,7 +61,6 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.VersionUtils; -import org.elasticsearch.test.cluster.TestClusterService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPoolModule; import org.junit.AfterClass; @@ -74,6 +73,8 @@ import java.util.HashSet; import java.util.List; import java.util.Set; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.createClusterService; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.setState; import static org.hamcrest.Matchers.equalTo; public abstract class BasePipelineAggregationTestCase extends ESTestCase { @@ -84,8 +85,8 @@ public abstract class BasePipelineAggregationTestCase> nodePlugins() { return Arrays.asList( - ExtractFieldScriptPlugin.class, - FieldValueScriptPlugin.class); + DateScriptsMockPlugin.class); } @After @@ -466,10 +454,12 @@ public class DateHistogramIT extends ESIntegTestCase { } public void testSingleValuedFieldWithValueScript() throws Exception { + Map params = new HashMap<>(); + params.put("fieldname", "date"); SearchResponse response = client().prepareSearch("idx") .addAggregation(dateHistogram("histo") .field("date") - .script(new Script("", ScriptType.INLINE, FieldValueScriptEngine.NAME, null)) + .script(new Script(DateScriptMocks.PlusOneMonthScript.NAME, ScriptType.INLINE, "native", params)) .dateHistogramInterval(DateHistogramInterval.MONTH)).execute().actionGet(); assertSearchResponse(response); @@ -600,10 +590,12 @@ public class DateHistogramIT extends ESIntegTestCase { * doc 6: [ Apr 23, May 24] */ public void testMultiValuedFieldWithValueScript() throws Exception { + Map params = new HashMap<>(); + params.put("fieldname", "dates"); SearchResponse response = client().prepareSearch("idx") .addAggregation(dateHistogram("histo") .field("dates") - .script(new Script("", ScriptType.INLINE, FieldValueScriptEngine.NAME, null)) + .script(new Script(DateScriptMocks.PlusOneMonthScript.NAME, ScriptType.INLINE, "native", params)) .dateHistogramInterval(DateHistogramInterval.MONTH)).execute().actionGet(); assertSearchResponse(response); @@ -652,8 +644,11 @@ public class DateHistogramIT extends ESIntegTestCase { * Mar 23 */ public void testScriptSingleValue() throws Exception { + Map params = new HashMap<>(); + params.put("fieldname", "date"); SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo").script(new Script("date", ScriptType.INLINE, ExtractFieldScriptEngine.NAME, null)).dateHistogramInterval(DateHistogramInterval.MONTH)) + .addAggregation(dateHistogram("histo").script(new Script(DateScriptMocks.ExtractFieldScript.NAME, + ScriptType.INLINE, "native", params)).dateHistogramInterval(DateHistogramInterval.MONTH)) .execute().actionGet(); assertSearchResponse(response); @@ -687,8 +682,11 @@ public class DateHistogramIT extends ESIntegTestCase { } public void testScriptMultiValued() throws Exception { + Map params = new HashMap<>(); + params.put("fieldname", "dates"); SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo").script(new Script("dates", ScriptType.INLINE, ExtractFieldScriptEngine.NAME, null)).dateHistogramInterval(DateHistogramInterval.MONTH)) + .addAggregation(dateHistogram("histo").script(new Script(DateScriptMocks.ExtractFieldScript.NAME, + ScriptType.INLINE, "native", params)).dateHistogramInterval(DateHistogramInterval.MONTH)) .execute().actionGet(); assertSearchResponse(response); @@ -1148,256 +1146,4 @@ public class DateHistogramIT extends ESIntegTestCase { Histogram histo = response.getAggregations().get("histo"); assertThat(histo.getBuckets().size(), greaterThan(0)); } - - /** - * Mock plugin for the {@link ExtractFieldScriptEngine} - */ - public static class ExtractFieldScriptPlugin extends Plugin { - - @Override - public String name() { - return ExtractFieldScriptEngine.NAME; - } - - @Override - public String description() { - return "Mock script engine for " + DateHistogramIT.class; - } - - public void onModule(ScriptModule module) { - module.addScriptEngine(new ScriptEngineRegistry.ScriptEngineRegistration(ExtractFieldScriptEngine.class, ExtractFieldScriptEngine.TYPES)); - } - - } - - /** - * This mock script returns the field that is specified by name in the script body - */ - public static class ExtractFieldScriptEngine implements ScriptEngineService { - - public static final String NAME = "extract_field"; - - public static final List TYPES = Collections.singletonList(NAME); - - @Override - public void close() throws IOException { - } - - @Override - public List getTypes() { - return TYPES; - } - - @Override - public List getExtensions() { - return TYPES; - } - - @Override - public boolean isSandboxed() { - return true; - } - - @Override - public Object compile(String script, Map params) { - return script; - } - - @Override - public ExecutableScript executable(CompiledScript compiledScript, Map params) { - throw new UnsupportedOperationException(); - } - @Override - public SearchScript search(CompiledScript compiledScript, SearchLookup lookup, Map vars) { - return new SearchScript() { - - @Override - public LeafSearchScript getLeafSearchScript(LeafReaderContext context) throws IOException { - - final LeafSearchLookup leafLookup = lookup.getLeafSearchLookup(context); - - return new LeafSearchScript() { - @Override - public void setNextVar(String name, Object value) { - } - - @Override - public Object run() { - String fieldName = (String) compiledScript.compiled(); - return leafLookup.doc().get(fieldName); - } - - @Override - public void setScorer(Scorer scorer) { - } - - @Override - public void setSource(Map source) { - } - - @Override - public void setDocument(int doc) { - if (leafLookup != null) { - leafLookup.setDocument(doc); - } - } - - @Override - public long runAsLong() { - throw new UnsupportedOperationException(); - } - - @Override - public float runAsFloat() { - throw new UnsupportedOperationException(); - } - - @Override - public double runAsDouble() { - throw new UnsupportedOperationException(); - } - }; - } - - @Override - public boolean needsScores() { - return false; - } - }; - } - - @Override - public void scriptRemoved(CompiledScript script) { - } - } - - /** - * Mock plugin for the {@link FieldValueScriptEngine} - */ - public static class FieldValueScriptPlugin extends Plugin { - - @Override - public String name() { - return FieldValueScriptEngine.NAME; - } - - @Override - public String description() { - return "Mock script engine for " + DateHistogramIT.class; - } - - public void onModule(ScriptModule module) { - module.addScriptEngine(new ScriptEngineRegistry.ScriptEngineRegistration(FieldValueScriptEngine.class, FieldValueScriptEngine.TYPES)); - } - - } - - /** - * This mock script returns the field value and adds one month to the returned date - */ - public static class FieldValueScriptEngine implements ScriptEngineService { - - public static final String NAME = "field_value"; - - public static final List TYPES = Collections.singletonList(NAME); - - @Override - public void close() throws IOException { - } - - @Override - public List getTypes() { - return TYPES; - } - - @Override - public List getExtensions() { - return TYPES; - } - - @Override - public boolean isSandboxed() { - return true; - } - - @Override - public Object compile(String script, Map params) { - return script; - } - - @Override - public ExecutableScript executable(CompiledScript compiledScript, Map params) { - throw new UnsupportedOperationException(); - } - @Override - public SearchScript search(CompiledScript compiledScript, SearchLookup lookup, Map vars) { - return new SearchScript() { - - private Map vars = new HashMap<>(2); - - @Override - public LeafSearchScript getLeafSearchScript(LeafReaderContext context) throws IOException { - - final LeafSearchLookup leafLookup = lookup.getLeafSearchLookup(context); - - return new LeafSearchScript() { - - @Override - public Object unwrap(Object value) { - throw new UnsupportedOperationException(); - } - - @Override - public void setNextVar(String name, Object value) { - vars.put(name, value); - } - - @Override - public Object run() { - throw new UnsupportedOperationException(); - } - - @Override - public void setScorer(Scorer scorer) { - } - - @Override - public void setSource(Map source) { - } - - @Override - public void setDocument(int doc) { - if (leafLookup != null) { - leafLookup.setDocument(doc); - } - } - - @Override - public long runAsLong() { - return new DateTime((long) vars.get("_value"), DateTimeZone.UTC).plusMonths(1).getMillis(); - } - - @Override - public float runAsFloat() { - throw new UnsupportedOperationException(); - } - - @Override - public double runAsDouble() { - return new DateTime(new Double((double) vars.get("_value")).longValue(), DateTimeZone.UTC).plusMonths(1).getMillis(); - } - }; - } - - @Override - public boolean needsScores() { - return false; - } - }; - } - - @Override - public void scriptRemoved(CompiledScript script) { - } - } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java index cc96555c372..2200e0e30ca 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java @@ -65,13 +65,6 @@ public class DateHistogramOffsetIT extends ESIntegTestCase { return Collections.singleton(AssertingLocalTransport.TestPlugin.class); } - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put(AssertingLocalTransport.ASSERTING_TRANSPORT_MIN_VERSION_KEY.getKey(), Version.V_1_4_0_Beta1).build(); - } - @Before public void beforeEachTest() throws IOException { prepareCreate("idx2").addMapping("type", "date", "type=date").execute().actionGet(); diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/DateRangeTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java similarity index 92% rename from modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/DateRangeTests.java rename to core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java index 44f7a93ade1..b1dc61a9b9e 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/DateRangeTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java @@ -16,13 +16,14 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.messy.tests; +package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; -import org.elasticsearch.script.groovy.GroovyPlugin; +import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.search.aggregations.bucket.DateScriptMocks.DateScriptsMockPlugin; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.range.Range; import org.elasticsearch.search.aggregations.bucket.range.Range.Bucket; @@ -36,8 +37,9 @@ import org.joda.time.DateTimeZone; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; @@ -55,12 +57,7 @@ import static org.hamcrest.core.IsNull.nullValue; * */ @ESIntegTestCase.SuiteScopeTestCase -public class DateRangeTests extends ESIntegTestCase { - - @Override - protected Collection> nodePlugins() { - return Collections.singleton(GroovyPlugin.class); - } +public class DateRangeIT extends ESIntegTestCase { private static IndexRequestBuilder indexDoc(int month, int day, int value) throws Exception { return client().prepareIndex("idx", "type").setSource(jsonBuilder() @@ -72,7 +69,11 @@ public class DateRangeTests extends ESIntegTestCase { } private static DateTime date(int month, int day) { - return new DateTime(2012, month, day, 0, 0, DateTimeZone.UTC); + return date(month, day, DateTimeZone.UTC); + } + + private static DateTime date(int month, int day, DateTimeZone timezone) { + return new DateTime(2012, month, day, 0, 0, timezone); } private static int numDocs; @@ -107,18 +108,26 @@ public class DateRangeTests extends ESIntegTestCase { ensureSearchable(); } + @Override + protected Collection> nodePlugins() { + return Arrays.asList( + DateScriptsMockPlugin.class); + } + public void testDateMath() throws Exception { + Map params = new HashMap<>(); + params.put("fieldname", "date"); DateRangeAggregatorBuilder rangeBuilder = dateRange("range"); if (randomBoolean()) { rangeBuilder.field("date"); } else { - rangeBuilder.script(new Script("doc['date'].value")); + rangeBuilder.script(new Script(DateScriptMocks.ExtractFieldScript.NAME, ScriptType.INLINE, "native", params)); } SearchResponse response = client() .prepareSearch("idx") .addAggregation( rangeBuilder.addUnboundedTo("a long time ago", "now-50y").addRange("recently", "now-50y", "now-1y") - .addUnboundedFrom("last year", "now-1y")).execute().actionGet(); + .addUnboundedFrom("last year", "now-1y").timeZone(DateTimeZone.forID("EST"))).execute().actionGet(); assertSearchResponse(response); @@ -286,17 +295,25 @@ public class DateRangeTests extends ESIntegTestCase { } public void testSingleValueFieldWithDateMath() throws Exception { + String[] ids = DateTimeZone.getAvailableIDs().toArray(new String[DateTimeZone.getAvailableIDs().size()]); + DateTimeZone timezone = DateTimeZone.forID(randomFrom(ids)); + int timeZoneOffset = timezone.getOffset(date(2, 15)); + // if time zone is UTC (or equivalent), time zone suffix is "Z", else something like "+03:00", which we get with the "ZZ" format + String feb15Suffix = timeZoneOffset == 0 ? "Z" : date(2,15, timezone).toString("ZZ"); + String mar15Suffix = timeZoneOffset == 0 ? "Z" : date(3,15, timezone).toString("ZZ"); + long expectedFirstBucketCount = timeZoneOffset < 0 ? 3L : 2L; + SearchResponse response = client().prepareSearch("idx") .addAggregation(dateRange("range") .field("date") .addUnboundedTo("2012-02-15") .addRange("2012-02-15", "2012-02-15||+1M") - .addUnboundedFrom("2012-02-15||+1M")) + .addUnboundedFrom("2012-02-15||+1M") + .timeZone(timezone)) .execute().actionGet(); assertSearchResponse(response); - Range range = response.getAggregations().get("range"); assertThat(range, notNullValue()); assertThat(range.getName(), equalTo("range")); @@ -305,30 +322,31 @@ public class DateRangeTests extends ESIntegTestCase { Range.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); + assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000" + feb15Suffix)); assertThat(((DateTime) bucket.getFrom()), nullValue()); - assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15, timezone).toDateTime(DateTimeZone.UTC))); assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getDocCount(), equalTo(2L)); + assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000" + feb15Suffix)); + assertThat(bucket.getDocCount(), equalTo(expectedFirstBucketCount)); bucket = buckets.get(1); assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15))); - assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); - assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); + assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000" + feb15Suffix + + "-2012-03-15T00:00:00.000" + mar15Suffix)); + assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15, timezone).toDateTime(DateTimeZone.UTC))); + assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15, timezone).toDateTime(DateTimeZone.UTC))); + assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000" + feb15Suffix)); + assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000" + mar15Suffix)); assertThat(bucket.getDocCount(), equalTo(2L)); bucket = buckets.get(2); assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000" + mar15Suffix + "-*")); + assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15, timezone).toDateTime(DateTimeZone.UTC))); assertThat(((DateTime) bucket.getTo()), nullValue()); - assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); + assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000" + mar15Suffix)); assertThat(bucket.getToAsString(), nullValue()); - assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); + assertThat(bucket.getDocCount(), equalTo(numDocs - 2L - expectedFirstBucketCount)); } public void testSingleValueFieldWithCustomKey() throws Exception { @@ -520,10 +538,12 @@ public class DateRangeTests extends ESIntegTestCase { public void testMultiValuedFieldWithValueScript() throws Exception { + Map params = new HashMap<>(); + params.put("fieldname", "dates"); SearchResponse response = client().prepareSearch("idx") .addAggregation(dateRange("range") .field("dates") - .script(new Script("new DateTime(_value.longValue(), DateTimeZone.UTC).plusMonths(1).getMillis()")) + .script(new Script(DateScriptMocks.PlusOneMonthScript.NAME, ScriptType.INLINE, "native", params)) .addUnboundedTo(date(2, 15)).addRange(date(2, 15), date(3, 15)).addUnboundedFrom(date(3, 15))).execute() .actionGet(); @@ -575,9 +595,11 @@ public class DateRangeTests extends ESIntegTestCase { */ public void testScriptSingleValue() throws Exception { + Map params = new HashMap<>(); + params.put("fieldname", "date"); SearchResponse response = client().prepareSearch("idx") .addAggregation(dateRange("range") - .script(new Script("doc['date'].value")) + .script(new Script(DateScriptMocks.ExtractFieldScript.NAME, ScriptType.INLINE, "native", params)) .addUnboundedTo(date(2, 15)) .addRange(date(2, 15), date(3, 15)) .addUnboundedFrom(date(3, 15))) @@ -634,11 +656,14 @@ public class DateRangeTests extends ESIntegTestCase { */ public void testScriptMultiValued() throws Exception { + Map params = new HashMap<>(); + params.put("fieldname", "dates"); SearchResponse response = client() .prepareSearch("idx") .addAggregation( - dateRange("range").script(new Script("doc['dates'].values")).addUnboundedTo(date(2, 15)) - .addRange(date(2, 15), date(3, 15)).addUnboundedFrom(date(3, 15))).execute().actionGet(); + dateRange("range").script(new Script(DateScriptMocks.ExtractFieldScript.NAME, ScriptType.INLINE, "native", params)) + .addUnboundedTo(date(2, 15)).addRange(date(2, 15), date(3, 15)) + .addUnboundedFrom(date(3, 15))).execute().actionGet(); assertSearchResponse(response); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeTests.java index 94156fc3a5d..71b61c0e6e6 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeTests.java @@ -22,9 +22,12 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator.Range; import org.elasticsearch.search.aggregations.bucket.range.date.DateRangeAggregatorBuilder; +import org.joda.time.DateTimeZone; public class DateRangeTests extends BaseAggregationTestCase { + private final static String[] timeZoneIds = DateTimeZone.getAvailableIDs().toArray(new String[DateTimeZone.getAvailableIDs().size()]); + @Override protected DateRangeAggregatorBuilder createTestAggregatorBuilder() { int numRanges = randomIntBetween(1, 10); @@ -56,6 +59,9 @@ public class DateRangeTests extends BaseAggregationTestCase params) { + return new ExtractFieldScript((String) params.get("fieldname")); + } + @Override + public boolean needsScores() { + return false; + } + } + + public static class ExtractFieldScript extends AbstractSearchScript { + + public static final String NAME = "extract_field"; + private String fieldname; + + public ExtractFieldScript(String fieldname) { + this.fieldname = fieldname; + } + + @Override + public Object run() { + return doc().get(fieldname); + } + } + + public static class PlusOneMonthScriptFactory implements NativeScriptFactory { + + @Override + public ExecutableScript newScript(Map params) { + return new PlusOneMonthScript((String) params.get("fieldname")); + } + + @Override + public boolean needsScores() { + return false; + } + } + + /** + * This mock script takes date field value and adds one month to the returned date + */ + public static class PlusOneMonthScript extends AbstractSearchScript { + + public static final String NAME = "date_plus_1_month"; + private String fieldname; + + private Map vars = new HashMap<>(); + + public PlusOneMonthScript(String fieldname) { + this.fieldname = fieldname; + } + + @Override + public void setNextVar(String name, Object value) { + vars.put(name, value); + } + + @Override + public long runAsLong() { + return new DateTime((long) vars.get("_value"), DateTimeZone.UTC).plusMonths(1).getMillis(); + } + + @Override + public double runAsDouble() { + return new DateTime(new Double((double) vars.get("_value")).longValue(), DateTimeZone.UTC).plusMonths(1).getMillis(); + } + + @Override + public Object run() { + return new UnsupportedOperationException(); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java index 8312f4aca04..6d2d11e2799 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java @@ -67,7 +67,7 @@ public class GeoDistanceIT extends ESIntegTestCase { return pluginList(InternalSettingsPlugin.class); // uses index.version.created } - private Version version = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT); + private Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); private IndexRequestBuilder indexCity(String idx, String name, String... latLons) throws Exception { XContentBuilder source = jsonBuilder().startObject().field("city", name); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java index 22413a7b319..5aa7ba44466 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java @@ -63,7 +63,7 @@ public class GeoHashGridIT extends ESIntegTestCase { return pluginList(InternalSettingsPlugin.class); // uses index.version.created } - private Version version = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT); + private Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); static ObjectIntMap expectedDocCountsForGeoHash = null; static ObjectIntMap multiValuedExpectedDocCountsForGeoHash = null; diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java index 044ca4f8045..11d838d43c4 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java @@ -84,7 +84,7 @@ public class NestedIT extends ESIntegTestCase { numParents = randomIntBetween(3, 10); numChildren = new int[numParents]; aggCollectionMode = randomFrom(SubAggCollectionMode.values()); - logger.info("AGG COLLECTION MODE: " + aggCollectionMode); + logger.info("AGG COLLECTION MODE: {}", aggCollectionMode); int totalChildren = 0; for (int i = 0; i < numParents; ++i) { if (i == numParents - 1 && totalChildren == 0) { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java index 8304922aa62..14f2912d19f 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java @@ -433,7 +433,7 @@ public class TopHitsIT extends ESIntegTestCase { assertThat(hits.totalHits(), equalTo(controlHits.totalHits())); assertThat(hits.getHits().length, equalTo(controlHits.getHits().length)); for (int i = 0; i < hits.getHits().length; i++) { - logger.info(i + ": top_hits: [" + hits.getAt(i).id() + "][" + hits.getAt(i).sortValues()[0] + "] control: [" + controlHits.getAt(i).id() + "][" + controlHits.getAt(i).sortValues()[0] + "]"); + logger.info("{}: top_hits: [{}][{}] control: [{}][{}]", i, hits.getAt(i).id(), hits.getAt(i).sortValues()[0], controlHits.getAt(i).id(), controlHits.getAt(i).sortValues()[0]); assertThat(hits.getAt(i).id(), equalTo(controlHits.getAt(i).id())); assertThat(hits.getAt(i).sortValues()[0], equalTo(controlHits.getAt(i).sortValues()[0])); } @@ -609,7 +609,7 @@ public class TopHitsIT extends ESIntegTestCase { public void testTrackScores() throws Exception { boolean[] trackScores = new boolean[]{true, false}; for (boolean trackScore : trackScores) { - logger.info("Track score=" + trackScore); + logger.info("Track score={}", trackScore); SearchResponse response = client().prepareSearch("idx").setTypes("field-collapsing") .setQuery(matchQuery("text", "term rare")) .addAggregation(terms("terms") diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java index cccac925a1f..05ea6148a56 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.search.aggregations.BaseAggregationTestCase; import org.elasticsearch.search.aggregations.metrics.tophits.TopHitsAggregatorBuilder; import org.elasticsearch.search.fetch.source.FetchSourceContext; import org.elasticsearch.search.highlight.HighlightBuilderTests; +import org.elasticsearch.search.sort.ScriptSortBuilder.ScriptSortType; import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.search.sort.SortOrder; @@ -132,7 +133,7 @@ public class TopHitsTests extends BaseAggregationTestCase EXCEPTION_TOP_LEVEL_RATIO_SETTING = Setting.doubleSetting(EXCEPTION_TOP_LEVEL_RATIO_KEY, 0.1d, 0.0d, false, Setting.Scope.INDEX); - public static final Setting EXCEPTION_LOW_LEVEL_RATIO_SETTING = Setting.doubleSetting(EXCEPTION_LOW_LEVEL_RATIO_KEY, 0.1d, 0.0d, false, Setting.Scope.INDEX); + public static final Setting EXCEPTION_TOP_LEVEL_RATIO_SETTING = + Setting.doubleSetting(EXCEPTION_TOP_LEVEL_RATIO_KEY, 0.1d, 0.0d, Property.IndexScope); + public static final Setting EXCEPTION_LOW_LEVEL_RATIO_SETTING = + Setting.doubleSetting(EXCEPTION_LOW_LEVEL_RATIO_KEY, 0.1d, 0.0d, Property.IndexScope); @Override public String name() { return "random-exception-reader-wrapper"; diff --git a/core/src/test/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java b/core/src/test/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java index 3cd1d269275..bd1d6ed9795 100644 --- a/core/src/test/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java +++ b/core/src/test/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java @@ -90,7 +90,7 @@ public class TransportSearchFailuresIT extends ESIntegTestCase { .cluster() .health(clusterHealthRequest("test").waitForYellowStatus().waitForRelocatingShards(0) .waitForActiveShards(test.totalNumShards)).actionGet(); - logger.info("Done Cluster Health, status " + clusterHealth.getStatus()); + logger.info("Done Cluster Health, status {}", clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), anyOf(equalTo(ClusterHealthStatus.YELLOW), equalTo(ClusterHealthStatus.GREEN))); assertThat(clusterHealth.getActiveShards(), equalTo(test.totalNumShards)); diff --git a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java index 6b29cabe3f6..df992a06aae 100644 --- a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java @@ -20,10 +20,10 @@ package org.elasticsearch.search.builder; import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -49,15 +49,9 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.EnvironmentModule; import org.elasticsearch.index.Index; import org.elasticsearch.index.query.AbstractQueryTestCase; -import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.EmptyQueryBuilder; -import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.index.query.QueryRewriteContext; -import org.elasticsearch.index.query.TermQueryBuilder; -import org.elasticsearch.index.query.WrapperQueryBuilder; -import org.elasticsearch.index.query.functionscore.ScoreFunctionParser; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; @@ -80,15 +74,15 @@ import org.elasticsearch.search.fetch.source.FetchSourceContext; import org.elasticsearch.search.highlight.HighlightBuilderTests; import org.elasticsearch.search.rescore.QueryRescoreBuilderTests; import org.elasticsearch.search.searchafter.SearchAfterBuilder; +import org.elasticsearch.search.sort.ScriptSortBuilder.ScriptSortType; import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.search.sort.SortOrder; -import org.elasticsearch.search.suggest.SuggestBuilder; -import org.elasticsearch.search.suggest.SuggestBuilders; +import org.elasticsearch.search.suggest.SuggestBuilderTests; +import org.elasticsearch.search.suggest.Suggesters; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.VersionUtils; -import org.elasticsearch.test.cluster.TestClusterService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPoolModule; import org.junit.AfterClass; @@ -102,6 +96,8 @@ import java.util.List; import java.util.Set; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.createClusterService; +import static org.elasticsearch.cluster.service.ClusterServiceUtils.setState; import static org.hamcrest.Matchers.equalTo; public class SearchSourceBuilderTests extends ESTestCase { @@ -115,6 +111,8 @@ public class SearchSourceBuilderTests extends ESTestCase { private static AggregatorParsers aggParsers; + private static Suggesters suggesters; + private static String[] currentTypes; private static ParseFieldMatcher parseFieldMatcher; @@ -133,8 +131,9 @@ public class SearchSourceBuilderTests extends ESTestCase { namedWriteableRegistry = new NamedWriteableRegistry(); index = new Index(randomAsciiOfLengthBetween(1, 10), "_na_"); Settings indexSettings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); - final TestClusterService clusterService = new TestClusterService(); - clusterService.setState(new ClusterState.Builder(clusterService.state()).metaData(new MetaData.Builder() + final ThreadPool threadPool = new ThreadPool(settings); + final ClusterService clusterService = createClusterService(threadPool); + setState(clusterService, new ClusterState.Builder(clusterService.state()).metaData(new MetaData.Builder() .put(new IndexMetaData.Builder(index.getName()).settings(indexSettings).numberOfShards(1).numberOfReplicas(0)))); SettingsModule settingsModule = new SettingsModule(settings); settingsModule.registerSetting(InternalSettingsPlugin.VERSION_CREATED); @@ -170,9 +169,8 @@ public class SearchSourceBuilderTests extends ESTestCase { scriptModule.prepareSettings(settingsModule); injector = new ModulesBuilder().add( new EnvironmentModule(new Environment(settings)), settingsModule, - new ThreadPoolModule(new ThreadPool(settings)), + new ThreadPoolModule(threadPool), scriptModule, new IndicesModule() { - @Override protected void configure() { bindMapperExtension(); @@ -182,13 +180,8 @@ public class SearchSourceBuilderTests extends ESTestCase { protected void configureSearch() { // Skip me } - @Override - protected void configureSuggesters() { - // Skip me - } }, new IndexSettingsModule(index, settings), - new AbstractModule() { @Override protected void configure() { @@ -199,6 +192,7 @@ public class SearchSourceBuilderTests extends ESTestCase { } ).createInjector(); aggParsers = injector.getInstance(AggregatorParsers.class); + suggesters = injector.getInstance(Suggesters.class); // create some random type with some default field, those types will // stick around for all of the subclasses currentTypes = new String[randomIntBetween(0, 5)]; @@ -212,6 +206,7 @@ public class SearchSourceBuilderTests extends ESTestCase { @AfterClass public static void afterClass() throws Exception { + injector.getInstance(ClusterService.class).close(); terminate(injector.getInstance(ThreadPool.class)); injector = null; index = null; @@ -285,26 +280,26 @@ public class SearchSourceBuilderTests extends ESTestCase { excludes[i] = randomAsciiOfLengthBetween(5, 20); } switch (branch) { - case 0: - fetchSourceContext = new FetchSourceContext(randomBoolean()); - break; - case 1: - fetchSourceContext = new FetchSourceContext(includes, excludes); - break; - case 2: - fetchSourceContext = new FetchSourceContext(randomAsciiOfLengthBetween(5, 20), randomAsciiOfLengthBetween(5, 20)); - break; - case 3: - fetchSourceContext = new FetchSourceContext(true, includes, excludes); - break; - case 4: - fetchSourceContext = new FetchSourceContext(includes); - break; - case 5: - fetchSourceContext = new FetchSourceContext(randomAsciiOfLengthBetween(5, 20)); - break; - default: - throw new IllegalStateException(); + case 0: + fetchSourceContext = new FetchSourceContext(randomBoolean()); + break; + case 1: + fetchSourceContext = new FetchSourceContext(includes, excludes); + break; + case 2: + fetchSourceContext = new FetchSourceContext(randomAsciiOfLengthBetween(5, 20), randomAsciiOfLengthBetween(5, 20)); + break; + case 3: + fetchSourceContext = new FetchSourceContext(true, includes, excludes); + break; + case 4: + fetchSourceContext = new FetchSourceContext(includes); + break; + case 5: + fetchSourceContext = new FetchSourceContext(randomAsciiOfLengthBetween(5, 20)); + break; + default: + throw new IllegalStateException(); } builder.fetchSource(fetchSourceContext); } @@ -339,25 +334,26 @@ public class SearchSourceBuilderTests extends ESTestCase { for (int i = 0; i < numSorts; i++) { int branch = randomInt(5); switch (branch) { - case 0: - builder.sort(SortBuilders.fieldSort(randomAsciiOfLengthBetween(5, 20)).order(randomFrom(SortOrder.values()))); - break; - case 1: - builder.sort(SortBuilders.geoDistanceSort(randomAsciiOfLengthBetween(5, 20), - AbstractQueryTestCase.randomGeohash(1, 12)).order(randomFrom(SortOrder.values()))); - break; - case 2: - builder.sort(SortBuilders.scoreSort().order(randomFrom(SortOrder.values()))); - break; - case 3: - builder.sort(SortBuilders.scriptSort(new Script("foo"), "number").order(randomFrom(SortOrder.values()))); - break; - case 4: - builder.sort(randomAsciiOfLengthBetween(5, 20)); - break; - case 5: - builder.sort(randomAsciiOfLengthBetween(5, 20), randomFrom(SortOrder.values())); - break; + case 0: + builder.sort(SortBuilders.fieldSort(randomAsciiOfLengthBetween(5, 20)).order(randomFrom(SortOrder.values()))); + break; + case 1: + builder.sort(SortBuilders.geoDistanceSort(randomAsciiOfLengthBetween(5, 20), + AbstractQueryTestCase.randomGeohash(1, 12)).order(randomFrom(SortOrder.values()))); + break; + case 2: + builder.sort(SortBuilders.scoreSort().order(randomFrom(SortOrder.values()))); + break; + case 3: + builder.sort(SortBuilders.scriptSort(new Script("foo"), + ScriptSortType.NUMBER).order(randomFrom(SortOrder.values()))); + break; + case 4: + builder.sort(randomAsciiOfLengthBetween(5, 20)); + break; + case 5: + builder.sort(randomAsciiOfLengthBetween(5, 20), randomFrom(SortOrder.values())); + break; } } } @@ -415,9 +411,7 @@ public class SearchSourceBuilderTests extends ESTestCase { builder.highlighter(HighlightBuilderTests.randomHighlighterBuilder()); } if (randomBoolean()) { - // NORELEASE need a random suggest builder method - builder.suggest(new SuggestBuilder().setText(randomAsciiOfLengthBetween(1, 5)).addSuggestion( - SuggestBuilders.termSuggestion(randomAsciiOfLengthBetween(1, 5)))); + builder.suggest(SuggestBuilderTests.randomSuggestBuilder()); } if (randomBoolean()) { // NORELEASE need a random inner hits builder method @@ -465,7 +459,7 @@ public class SearchSourceBuilderTests extends ESTestCase { if (randomBoolean()) { parser.nextToken(); // sometimes we move it on the START_OBJECT to test the embedded case } - SearchSourceBuilder newBuilder = SearchSourceBuilder.parseSearchSource(parser, parseContext, aggParsers); + SearchSourceBuilder newBuilder = SearchSourceBuilder.parseSearchSource(parser, parseContext, aggParsers, suggesters); assertNull(parser.nextToken()); assertEquals(testBuilder, newBuilder); assertEquals(testBuilder.hashCode(), newBuilder.hashCode()); @@ -503,14 +497,17 @@ public class SearchSourceBuilderTests extends ESTestCase { assertTrue("source builder is not equal to self", secondBuilder.equals(secondBuilder)); assertTrue("source builder is not equal to its copy", firstBuilder.equals(secondBuilder)); assertTrue("source builder is not symmetric", secondBuilder.equals(firstBuilder)); - assertThat("source builder copy's hashcode is different from original hashcode", secondBuilder.hashCode(), equalTo(firstBuilder.hashCode())); + assertThat("source builder copy's hashcode is different from original hashcode", + secondBuilder.hashCode(), equalTo(firstBuilder.hashCode())); SearchSourceBuilder thirdBuilder = copyBuilder(secondBuilder); assertTrue("source builder is not equal to self", thirdBuilder.equals(thirdBuilder)); assertTrue("source builder is not equal to its copy", secondBuilder.equals(thirdBuilder)); - assertThat("source builder copy's hashcode is different from original hashcode", secondBuilder.hashCode(), equalTo(thirdBuilder.hashCode())); + assertThat("source builder copy's hashcode is different from original hashcode", + secondBuilder.hashCode(), equalTo(thirdBuilder.hashCode())); assertTrue("equals is not transitive", firstBuilder.equals(thirdBuilder)); - assertThat("source builder copy's hashcode is different from original hashcode", firstBuilder.hashCode(), equalTo(thirdBuilder.hashCode())); + assertThat("source builder copy's hashcode is different from original hashcode", + firstBuilder.hashCode(), equalTo(thirdBuilder.hashCode())); assertTrue("equals is not symmetric", thirdBuilder.equals(secondBuilder)); assertTrue("equals is not symmetric", thirdBuilder.equals(firstBuilder)); } @@ -530,16 +527,16 @@ public class SearchSourceBuilderTests extends ESTestCase { String restContent = " { \"_source\": { \"includes\": \"include\", \"excludes\": \"*.field2\"}}"; try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) { SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.parseSearchSource(parser, createParseContext(parser), - aggParsers); - assertArrayEquals(new String[]{"*.field2" }, searchSourceBuilder.fetchSource().excludes()); - assertArrayEquals(new String[]{"include" }, searchSourceBuilder.fetchSource().includes()); + aggParsers, suggesters); + assertArrayEquals(new String[]{"*.field2"}, searchSourceBuilder.fetchSource().excludes()); + assertArrayEquals(new String[]{"include"}, searchSourceBuilder.fetchSource().includes()); } } { String restContent = " { \"_source\": false}"; try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) { SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.parseSearchSource(parser, createParseContext(parser), - aggParsers); + aggParsers, suggesters); assertArrayEquals(new String[]{}, searchSourceBuilder.fetchSource().excludes()); assertArrayEquals(new String[]{}, searchSourceBuilder.fetchSource().includes()); assertFalse(searchSourceBuilder.fetchSource().fetchSource()); @@ -552,9 +549,9 @@ public class SearchSourceBuilderTests extends ESTestCase { String restContent = " { \"sort\": \"foo\"}"; try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) { SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.parseSearchSource(parser, createParseContext(parser), - aggParsers); + aggParsers, suggesters); assertEquals(1, searchSourceBuilder.sorts().size()); - assertEquals("{\"foo\":{}}", searchSourceBuilder.sorts().get(0).toUtf8()); + assertEquals("{\"foo\":{\"order\":\"asc\"}}", searchSourceBuilder.sorts().get(0).toUtf8()); } } @@ -568,7 +565,7 @@ public class SearchSourceBuilderTests extends ESTestCase { " ]}"; try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) { SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.parseSearchSource(parser, createParseContext(parser), - aggParsers); + aggParsers, suggesters); assertEquals(5, searchSourceBuilder.sorts().size()); assertEquals("{\"post_date\":{\"order\":\"asc\"}}", searchSourceBuilder.sorts().get(0).toUtf8()); assertEquals("\"user\"", searchSourceBuilder.sorts().get(1).toUtf8()); diff --git a/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java b/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java index 9440a3e91c1..87d5a00d2a2 100644 --- a/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java @@ -51,7 +51,6 @@ import org.hamcrest.Matchers; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -94,13 +93,14 @@ import static org.hamcrest.Matchers.notNullValue; */ @ClusterScope(scope = Scope.SUITE) public class ChildQuerySearchIT extends ESIntegTestCase { + @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)) - // aggressive filter caching so that we can assert on the filter cache size - .put(IndexModule.INDEX_QUERY_CACHE_TYPE_SETTING.getKey(), IndexModule.INDEX_QUERY_CACHE) - .put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), true) - .build(); + public Settings indexSettings() { + return Settings.settingsBuilder().put(super.indexSettings()) + // aggressive filter caching so that we can assert on the filter cache size + .put(IndexModule.INDEX_QUERY_CACHE_TYPE_SETTING.getKey(), IndexModule.INDEX_QUERY_CACHE) + .put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), true) + .build(); } public void testSelfReferentialIsForbidden() { @@ -576,20 +576,20 @@ public class ChildQuerySearchIT extends ESIntegTestCase { .setQuery(hasChildQuery("child", termQuery("c_field", "1")).scoreMode(ScoreMode.Max)) .get(); assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).explanation().getDescription(), equalTo("Score based on join value p1")); + assertThat(searchResponse.getHits().getAt(0).explanation().getDescription(), containsString("join value p1")); searchResponse = client().prepareSearch("test") .setExplain(true) .setQuery(hasParentQuery("parent", termQuery("p_field", "1")).score(true)) .get(); assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).explanation().getDescription(), equalTo("Score based on join value p1")); + assertThat(searchResponse.getHits().getAt(0).explanation().getDescription(), containsString("join value p1")); ExplainResponse explainResponse = client().prepareExplain("test", "parent", parentId) .setQuery(hasChildQuery("child", termQuery("c_field", "1")).scoreMode(ScoreMode.Max)) .get(); assertThat(explainResponse.isExists(), equalTo(true)); - assertThat(explainResponse.getExplanation().getDetails()[0].getDescription(), equalTo("Score based on join value p1")); + assertThat(explainResponse.getExplanation().getDetails()[0].getDescription(), containsString("join value p1")); } List createDocBuilders() { @@ -1561,7 +1561,7 @@ public class ChildQuerySearchIT extends ESIntegTestCase { SearchResponse response; // Score mode = NONE - response = minMaxQuery(ScoreMode.None, 0, 0); + response = minMaxQuery(ScoreMode.None, 0, null); assertThat(response.getHits().totalHits(), equalTo(3L)); assertThat(response.getHits().hits()[0].id(), equalTo("2")); @@ -1571,7 +1571,7 @@ public class ChildQuerySearchIT extends ESIntegTestCase { assertThat(response.getHits().hits()[2].id(), equalTo("4")); assertThat(response.getHits().hits()[2].score(), equalTo(1f)); - response = minMaxQuery(ScoreMode.None, 1, 0); + response = minMaxQuery(ScoreMode.None, 1, null); assertThat(response.getHits().totalHits(), equalTo(3L)); assertThat(response.getHits().hits()[0].id(), equalTo("2")); @@ -1581,7 +1581,7 @@ public class ChildQuerySearchIT extends ESIntegTestCase { assertThat(response.getHits().hits()[2].id(), equalTo("4")); assertThat(response.getHits().hits()[2].score(), equalTo(1f)); - response = minMaxQuery(ScoreMode.None, 2, 0); + response = minMaxQuery(ScoreMode.None, 2, null); assertThat(response.getHits().totalHits(), equalTo(2L)); assertThat(response.getHits().hits()[0].id(), equalTo("3")); @@ -1589,13 +1589,13 @@ public class ChildQuerySearchIT extends ESIntegTestCase { assertThat(response.getHits().hits()[1].id(), equalTo("4")); assertThat(response.getHits().hits()[1].score(), equalTo(1f)); - response = minMaxQuery(ScoreMode.None, 3, 0); + response = minMaxQuery(ScoreMode.None, 3, null); assertThat(response.getHits().totalHits(), equalTo(1L)); assertThat(response.getHits().hits()[0].id(), equalTo("4")); assertThat(response.getHits().hits()[0].score(), equalTo(1f)); - response = minMaxQuery(ScoreMode.None, 4, 0); + response = minMaxQuery(ScoreMode.None, 4, null); assertThat(response.getHits().totalHits(), equalTo(0L)); @@ -1641,7 +1641,7 @@ public class ChildQuerySearchIT extends ESIntegTestCase { } // Score mode = SUM - response = minMaxQuery(ScoreMode.Total, 0, 0); + response = minMaxQuery(ScoreMode.Total, 0, null); assertThat(response.getHits().totalHits(), equalTo(3L)); assertThat(response.getHits().hits()[0].id(), equalTo("4")); @@ -1651,7 +1651,7 @@ public class ChildQuerySearchIT extends ESIntegTestCase { assertThat(response.getHits().hits()[2].id(), equalTo("2")); assertThat(response.getHits().hits()[2].score(), equalTo(1f)); - response = minMaxQuery(ScoreMode.Total, 1, 0); + response = minMaxQuery(ScoreMode.Total, 1, null); assertThat(response.getHits().totalHits(), equalTo(3L)); assertThat(response.getHits().hits()[0].id(), equalTo("4")); @@ -1661,7 +1661,7 @@ public class ChildQuerySearchIT extends ESIntegTestCase { assertThat(response.getHits().hits()[2].id(), equalTo("2")); assertThat(response.getHits().hits()[2].score(), equalTo(1f)); - response = minMaxQuery(ScoreMode.Total, 2, 0); + response = minMaxQuery(ScoreMode.Total, 2, null); assertThat(response.getHits().totalHits(), equalTo(2L)); assertThat(response.getHits().hits()[0].id(), equalTo("4")); @@ -1669,13 +1669,13 @@ public class ChildQuerySearchIT extends ESIntegTestCase { assertThat(response.getHits().hits()[1].id(), equalTo("3")); assertThat(response.getHits().hits()[1].score(), equalTo(3f)); - response = minMaxQuery(ScoreMode.Total, 3, 0); + response = minMaxQuery(ScoreMode.Total, 3, null); assertThat(response.getHits().totalHits(), equalTo(1L)); assertThat(response.getHits().hits()[0].id(), equalTo("4")); assertThat(response.getHits().hits()[0].score(), equalTo(6f)); - response = minMaxQuery(ScoreMode.Total, 4, 0); + response = minMaxQuery(ScoreMode.Total, 4, null); assertThat(response.getHits().totalHits(), equalTo(0L)); @@ -1721,7 +1721,7 @@ public class ChildQuerySearchIT extends ESIntegTestCase { } // Score mode = MAX - response = minMaxQuery(ScoreMode.Max, 0, 0); + response = minMaxQuery(ScoreMode.Max, 0, null); assertThat(response.getHits().totalHits(), equalTo(3L)); assertThat(response.getHits().hits()[0].id(), equalTo("4")); @@ -1731,7 +1731,7 @@ public class ChildQuerySearchIT extends ESIntegTestCase { assertThat(response.getHits().hits()[2].id(), equalTo("2")); assertThat(response.getHits().hits()[2].score(), equalTo(1f)); - response = minMaxQuery(ScoreMode.Max, 1, 0); + response = minMaxQuery(ScoreMode.Max, 1, null); assertThat(response.getHits().totalHits(), equalTo(3L)); assertThat(response.getHits().hits()[0].id(), equalTo("4")); @@ -1741,7 +1741,7 @@ public class ChildQuerySearchIT extends ESIntegTestCase { assertThat(response.getHits().hits()[2].id(), equalTo("2")); assertThat(response.getHits().hits()[2].score(), equalTo(1f)); - response = minMaxQuery(ScoreMode.Max, 2, 0); + response = minMaxQuery(ScoreMode.Max, 2, null); assertThat(response.getHits().totalHits(), equalTo(2L)); assertThat(response.getHits().hits()[0].id(), equalTo("4")); @@ -1749,13 +1749,13 @@ public class ChildQuerySearchIT extends ESIntegTestCase { assertThat(response.getHits().hits()[1].id(), equalTo("3")); assertThat(response.getHits().hits()[1].score(), equalTo(2f)); - response = minMaxQuery(ScoreMode.Max, 3, 0); + response = minMaxQuery(ScoreMode.Max, 3, null); assertThat(response.getHits().totalHits(), equalTo(1L)); assertThat(response.getHits().hits()[0].id(), equalTo("4")); assertThat(response.getHits().hits()[0].score(), equalTo(3f)); - response = minMaxQuery(ScoreMode.Max, 4, 0); + response = minMaxQuery(ScoreMode.Max, 4, null); assertThat(response.getHits().totalHits(), equalTo(0L)); @@ -1801,7 +1801,7 @@ public class ChildQuerySearchIT extends ESIntegTestCase { } // Score mode = AVG - response = minMaxQuery(ScoreMode.Avg, 0, 0); + response = minMaxQuery(ScoreMode.Avg, 0, null); assertThat(response.getHits().totalHits(), equalTo(3L)); assertThat(response.getHits().hits()[0].id(), equalTo("4")); @@ -1811,7 +1811,7 @@ public class ChildQuerySearchIT extends ESIntegTestCase { assertThat(response.getHits().hits()[2].id(), equalTo("2")); assertThat(response.getHits().hits()[2].score(), equalTo(1f)); - response = minMaxQuery(ScoreMode.Avg, 1, 0); + response = minMaxQuery(ScoreMode.Avg, 1, null); assertThat(response.getHits().totalHits(), equalTo(3L)); assertThat(response.getHits().hits()[0].id(), equalTo("4")); @@ -1821,7 +1821,7 @@ public class ChildQuerySearchIT extends ESIntegTestCase { assertThat(response.getHits().hits()[2].id(), equalTo("2")); assertThat(response.getHits().hits()[2].score(), equalTo(1f)); - response = minMaxQuery(ScoreMode.Avg, 2, 0); + response = minMaxQuery(ScoreMode.Avg, 2, null); assertThat(response.getHits().totalHits(), equalTo(2L)); assertThat(response.getHits().hits()[0].id(), equalTo("4")); @@ -1829,13 +1829,13 @@ public class ChildQuerySearchIT extends ESIntegTestCase { assertThat(response.getHits().hits()[1].id(), equalTo("3")); assertThat(response.getHits().hits()[1].score(), equalTo(1.5f)); - response = minMaxQuery(ScoreMode.Avg, 3, 0); + response = minMaxQuery(ScoreMode.Avg, 3, null); assertThat(response.getHits().totalHits(), equalTo(1L)); assertThat(response.getHits().hits()[0].id(), equalTo("4")); assertThat(response.getHits().hits()[0].score(), equalTo(2f)); - response = minMaxQuery(ScoreMode.Avg, 4, 0); + response = minMaxQuery(ScoreMode.Avg, 4, null); assertThat(response.getHits().totalHits(), equalTo(0L)); diff --git a/core/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingIT.java b/core/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingIT.java index 0c7c069ec34..8afbdca8c2e 100644 --- a/core/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingIT.java +++ b/core/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.DocumentMapper; @@ -143,6 +144,7 @@ public class ParentFieldLoadingIT extends ESIntegTestCase { .setUpdateAllTypes(true) .get(); assertAcked(putMappingResponse); + Index test = resolveIndex("test"); assertBusy(new Runnable() { @Override public void run() { @@ -152,7 +154,7 @@ public class ParentFieldLoadingIT extends ESIntegTestCase { boolean verified = false; IndicesService indicesService = internalCluster().getInstance(IndicesService.class, nodeName); - IndexService indexService = indicesService.indexService("test"); + IndexService indexService = indicesService.indexService(test); if (indexService != null) { MapperService mapperService = indexService.mapperService(); DocumentMapper documentMapper = mapperService.documentMapper("child"); diff --git a/core/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java b/core/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java index 3159217f174..071f7375383 100644 --- a/core/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java +++ b/core/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.termvectors.TermVectorsRequest; import org.elasticsearch.action.termvectors.TermVectorsResponse; import org.elasticsearch.common.Priority; +import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.termvectors.TermVectorsService; @@ -173,7 +174,7 @@ public class FetchSubPhasePluginIT extends ESIntegTestCase { } hitField.values().add(tv); } catch (IOException e) { - e.printStackTrace(); + ESLoggerFactory.getLogger(FetchSubPhasePluginIT.class.getName()).info("Swallowed exception", e); } } } diff --git a/core/src/test/java/org/elasticsearch/search/fetch/innerhits/NestedChildrenFilterTests.java b/core/src/test/java/org/elasticsearch/search/fetch/innerhits/NestedChildrenFilterTests.java index 60810ee4df6..7587866b144 100644 --- a/core/src/test/java/org/elasticsearch/search/fetch/innerhits/NestedChildrenFilterTests.java +++ b/core/src/test/java/org/elasticsearch/search/fetch/innerhits/NestedChildrenFilterTests.java @@ -21,7 +21,7 @@ package org.elasticsearch.search.fetch.innerhits; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; -import org.apache.lucene.document.IntField; +import org.apache.lucene.document.LegacyIntField; import org.apache.lucene.document.StringField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; @@ -66,7 +66,7 @@ public class NestedChildrenFilterTests extends ESTestCase { Document parenDoc = new Document(); parenDoc.add(new StringField("type", "parent", Field.Store.NO)); - parenDoc.add(new IntField("num_child_docs", numChildDocs, Field.Store.YES)); + parenDoc.add(new LegacyIntField("num_child_docs", numChildDocs, Field.Store.YES)); docs.add(parenDoc); writer.addDocuments(docs); } diff --git a/core/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java b/core/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java index e96b4d69b00..175adc27892 100644 --- a/core/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java +++ b/core/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java @@ -31,8 +31,10 @@ import org.elasticsearch.common.lucene.search.function.CombineFunction; import org.elasticsearch.common.lucene.search.function.FiltersFunctionScoreQuery; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder; +import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.MultiValueMode; import org.elasticsearch.search.SearchHits; @@ -50,6 +52,7 @@ import java.util.Locale; import static org.elasticsearch.client.Requests.indexRequest; import static org.elasticsearch.client.Requests.searchRequest; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.query.QueryBuilders.boostingQuery; import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery; import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; @@ -74,6 +77,8 @@ public class DecayFunctionScoreIT extends ESIntegTestCase { return pluginList(InternalSettingsPlugin.class); // uses index.version.created } + private final QueryBuilder baseQuery = constantScoreQuery(termQuery("test", "value")); + public void testDistanceScoreGeoLinGaussExp() throws Exception { assertAcked(prepareCreate("test").addMapping( "type1", @@ -117,7 +122,7 @@ public class DecayFunctionScoreIT extends ESIntegTestCase { ActionFuture response = client().search( searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source( - searchSource().query(constantScoreQuery(termQuery("test", "value"))))); + searchSource().query(baseQuery))); SearchResponse sr = response.actionGet(); SearchHits sh = sr.getHits(); assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2))); @@ -125,7 +130,7 @@ public class DecayFunctionScoreIT extends ESIntegTestCase { response = client().search( searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source( searchSource().query( - functionScoreQuery(constantScoreQuery(termQuery("test", "value")), gaussDecayFunction("loc", lonlat, "1000km"))))); + functionScoreQuery(baseQuery, gaussDecayFunction("loc", lonlat, "1000km"))))); sr = response.actionGet(); sh = sr.getHits(); assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2))); @@ -136,7 +141,7 @@ public class DecayFunctionScoreIT extends ESIntegTestCase { response = client().search( searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source( - searchSource().query(constantScoreQuery(termQuery("test", "value"))))); + searchSource().query(baseQuery))); sr = response.actionGet(); sh = sr.getHits(); assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2))); @@ -144,7 +149,7 @@ public class DecayFunctionScoreIT extends ESIntegTestCase { response = client().search( searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source( searchSource().query( - functionScoreQuery(constantScoreQuery(termQuery("test", "value")), linearDecayFunction("loc", lonlat, "1000km"))))); + functionScoreQuery(baseQuery, linearDecayFunction("loc", lonlat, "1000km"))))); sr = response.actionGet(); sh = sr.getHits(); assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2))); @@ -155,7 +160,7 @@ public class DecayFunctionScoreIT extends ESIntegTestCase { response = client().search( searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source( - searchSource().query(constantScoreQuery(termQuery("test", "value"))))); + searchSource().query(baseQuery))); sr = response.actionGet(); sh = sr.getHits(); assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2))); @@ -163,7 +168,7 @@ public class DecayFunctionScoreIT extends ESIntegTestCase { response = client().search( searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source( searchSource().query( - functionScoreQuery(constantScoreQuery(termQuery("test", "value")), exponentialDecayFunction("loc", lonlat, "1000km"))))); + functionScoreQuery(baseQuery, exponentialDecayFunction("loc", lonlat, "1000km"))))); sr = response.actionGet(); sh = sr.getHits(); assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2))); @@ -314,30 +319,30 @@ public class DecayFunctionScoreIT extends ESIntegTestCase { .setSource( jsonBuilder().startObject().field("test", "value").startObject("loc").field("lat", 20).field("lon", 11).endObject() .endObject()).setRefresh(true).get(); - + FunctionScoreQueryBuilder baseQuery = functionScoreQuery(constantScoreQuery(termQuery("test", "value")), ScoreFunctionBuilders.weightFactorFunction(randomIntBetween(1, 10))); GeoPoint point = new GeoPoint(20, 11); ActionFuture response = client().search( searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source( searchSource().query( - functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("loc", point, "1000km")).boostMode( - CombineFunction.MULTIPLY)))); + functionScoreQuery(baseQuery, gaussDecayFunction("loc", point, "1000km")).boostMode( + CombineFunction.REPLACE)))); SearchResponse sr = response.actionGet(); SearchHits sh = sr.getHits(); assertThat(sh.getTotalHits(), equalTo((long) (1))); assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat((double) sh.getAt(0).score(), closeTo(0.30685282, 1.e-5)); + assertThat((double) sh.getAt(0).score(), closeTo(1.0, 1.e-5)); + // this is equivalent to new GeoPoint(20, 11); just flipped so scores must be same float[] coords = { 11, 20 }; - response = client().search( searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source( searchSource().query( - functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("loc", coords, "1000km")).boostMode( - CombineFunction.MULTIPLY)))); + functionScoreQuery(baseQuery, gaussDecayFunction("loc", coords, "1000km")).boostMode( + CombineFunction.REPLACE)))); sr = response.actionGet(); sh = sr.getHits(); assertThat(sh.getTotalHits(), equalTo((long) (1))); assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat((double) sh.getAt(0).score(), closeTo(0.30685282, 1.e-5)); + assertThat((double) sh.getAt(0).score(), closeTo(1.0f, 1.e-5)); } public void testCombineModes() throws Exception { @@ -348,26 +353,25 @@ public class DecayFunctionScoreIT extends ESIntegTestCase { ensureYellow(); client().prepareIndex().setType("type1").setId("1").setIndex("test") - .setSource(jsonBuilder().startObject().field("test", "value").field("num", 1.0).endObject()).setRefresh(true).get(); - - // function score should return 0.5 for this function - + .setSource(jsonBuilder().startObject().field("test", "value value").field("num", 1.0).endObject()).setRefresh(true).get(); + FunctionScoreQueryBuilder baseQuery = functionScoreQuery(constantScoreQuery(termQuery("test", "value")), ScoreFunctionBuilders.weightFactorFunction(2)); + // decay score should return 0.5 for this function and baseQuery should return 2.0f as it's score ActionFuture response = client().search( searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source( searchSource().query( - functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boost( - 2.0f).boostMode(CombineFunction.MULTIPLY)))); + functionScoreQuery(baseQuery, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)) + .boostMode(CombineFunction.MULTIPLY)))); SearchResponse sr = response.actionGet(); SearchHits sh = sr.getHits(); assertThat(sh.getTotalHits(), equalTo((long) (1))); assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat((double) sh.getAt(0).score(), closeTo(0.153426408, 1.e-5)); + assertThat((double) sh.getAt(0).score(), closeTo(1.0, 1.e-5)); response = client().search( searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source( searchSource().query( - functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boost( - 2.0f).boostMode(CombineFunction.REPLACE)))); + functionScoreQuery(baseQuery, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)) + .boostMode(CombineFunction.REPLACE)))); sr = response.actionGet(); sh = sr.getHits(); assertThat(sh.getTotalHits(), equalTo((long) (1))); @@ -377,48 +381,48 @@ public class DecayFunctionScoreIT extends ESIntegTestCase { response = client().search( searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source( searchSource().query( - functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boost( - 2.0f).boostMode(CombineFunction.SUM)))); + functionScoreQuery(baseQuery, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)) + .boostMode(CombineFunction.SUM)))); sr = response.actionGet(); sh = sr.getHits(); assertThat(sh.getTotalHits(), equalTo((long) (1))); assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat((double) sh.getAt(0).score(), closeTo(0.30685282 + 0.5, 1.e-5)); + assertThat((double) sh.getAt(0).score(), closeTo(2.0 + 0.5, 1.e-5)); logger.info("--> Hit[0] {} Explanation:\n {}", sr.getHits().getAt(0).id(), sr.getHits().getAt(0).explanation()); response = client().search( searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source( searchSource().query( - functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boost( - 2.0f).boostMode(CombineFunction.AVG)))); + functionScoreQuery(baseQuery, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)) + .boostMode(CombineFunction.AVG)))); sr = response.actionGet(); sh = sr.getHits(); assertThat(sh.getTotalHits(), equalTo((long) (1))); assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat((double) sh.getAt(0).score(), closeTo((0.30685282 + 0.5) / 2, 1.e-5)); + assertThat((double) sh.getAt(0).score(), closeTo((2.0 + 0.5) / 2, 1.e-5)); response = client().search( searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source( searchSource().query( - functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boost( - 2.0f).boostMode(CombineFunction.MIN)))); - sr = response.actionGet(); - sh = sr.getHits(); - assertThat(sh.getTotalHits(), equalTo((long) (1))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat((double) sh.getAt(0).score(), closeTo(0.30685282, 1.e-5)); - - response = client().search( - searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source( - searchSource().query( - functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boost( - 2.0f).boostMode(CombineFunction.MAX)))); + functionScoreQuery(baseQuery, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)) + .boostMode(CombineFunction.MIN)))); sr = response.actionGet(); sh = sr.getHits(); assertThat(sh.getTotalHits(), equalTo((long) (1))); assertThat(sh.getAt(0).getId(), equalTo("1")); assertThat((double) sh.getAt(0).score(), closeTo(0.5, 1.e-5)); + response = client().search( + searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source( + searchSource().query( + functionScoreQuery(baseQuery, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)) + .boostMode(CombineFunction.MAX)))); + sr = response.actionGet(); + sh = sr.getHits(); + assertThat(sh.getTotalHits(), equalTo((long) (1))); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat((double) sh.getAt(0).score(), closeTo(2.0, 1.e-5)); + } public void testExceptionThrownIfScaleLE0() throws Exception { @@ -509,7 +513,7 @@ public class DecayFunctionScoreIT extends ESIntegTestCase { ActionFuture response = client().search( searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source( searchSource().query( - functionScoreQuery(constantScoreQuery(termQuery("test", "value")), new FunctionScoreQueryBuilder.FilterFunctionBuilder[]{ + functionScoreQuery(baseQuery, new FunctionScoreQueryBuilder.FilterFunctionBuilder[]{ new FunctionScoreQueryBuilder.FilterFunctionBuilder(linearDecayFunction("num1", "2013-05-28", "+3d")), new FunctionScoreQueryBuilder.FilterFunctionBuilder(linearDecayFunction("num2", "0.0", "1")) }).scoreMode(FiltersFunctionScoreQuery.ScoreMode.MULTIPLY)))); @@ -733,7 +737,7 @@ public class DecayFunctionScoreIT extends ESIntegTestCase { ActionFuture response = client().search( searchRequest().source( - searchSource().query(constantScoreQuery(termQuery("test", "value"))))); + searchSource().query(baseQuery))); SearchResponse sr = response.actionGet(); assertSearchHits(sr, "1", "2"); SearchHits sh = sr.getHits(); @@ -745,7 +749,7 @@ public class DecayFunctionScoreIT extends ESIntegTestCase { response = client().search( searchRequest().source( searchSource().query( - functionScoreQuery(constantScoreQuery(termQuery("test", "value")), gaussDecayFunction("loc", lonlat, "1000km").setMultiValueMode(MultiValueMode.MIN))))); + functionScoreQuery(baseQuery, gaussDecayFunction("loc", lonlat, "1000km").setMultiValueMode(MultiValueMode.MIN))))); sr = response.actionGet(); assertSearchHits(sr, "1", "2"); sh = sr.getHits(); @@ -755,7 +759,7 @@ public class DecayFunctionScoreIT extends ESIntegTestCase { response = client().search( searchRequest().source( searchSource().query( - functionScoreQuery(constantScoreQuery(termQuery("test", "value")), gaussDecayFunction("loc", lonlat, "1000km").setMultiValueMode(MultiValueMode.MAX))))); + functionScoreQuery(baseQuery, gaussDecayFunction("loc", lonlat, "1000km").setMultiValueMode(MultiValueMode.MAX))))); sr = response.actionGet(); assertSearchHits(sr, "1", "2"); sh = sr.getHits(); @@ -784,7 +788,7 @@ public class DecayFunctionScoreIT extends ESIntegTestCase { response = client().search( searchRequest().source( searchSource().query( - functionScoreQuery(constantScoreQuery(termQuery("test", "value")), linearDecayFunction("num", "0", "10").setMultiValueMode(MultiValueMode.SUM))))); + functionScoreQuery(baseQuery, linearDecayFunction("num", "0", "10").setMultiValueMode(MultiValueMode.SUM))))); sr = response.actionGet(); assertSearchHits(sr, "1", "2"); sh = sr.getHits(); @@ -795,7 +799,7 @@ public class DecayFunctionScoreIT extends ESIntegTestCase { response = client().search( searchRequest().source( searchSource().query( - functionScoreQuery(constantScoreQuery(termQuery("test", "value")), linearDecayFunction("num", "0", "10").setMultiValueMode(MultiValueMode.AVG))))); + functionScoreQuery(baseQuery, linearDecayFunction("num", "0", "10").setMultiValueMode(MultiValueMode.AVG))))); sr = response.actionGet(); assertSearchHits(sr, "1", "2"); sh = sr.getHits(); diff --git a/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java b/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java index 8a060af2ab0..83e89592b8e 100644 --- a/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java +++ b/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java @@ -19,10 +19,11 @@ package org.elasticsearch.search.geo; -import com.spatial4j.core.context.SpatialContext; -import com.spatial4j.core.distance.DistanceUtils; -import com.spatial4j.core.exception.InvalidShapeException; -import com.spatial4j.core.shape.Shape; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.locationtech.spatial4j.context.SpatialContext; +import org.locationtech.spatial4j.distance.DistanceUtils; +import org.locationtech.spatial4j.exception.InvalidShapeException; +import org.locationtech.spatial4j.shape.Shape; import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy; import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree; @@ -560,7 +561,7 @@ public class GeoFilterIT extends ESIntegTestCase { strategy.makeQuery(args); return true; } catch (UnsupportedSpatialOperation e) { - e.printStackTrace(); + ESLoggerFactory.getLogger(GeoFilterIT.class.getName()).info("Unsupported spatial operation {}", e, relation); return false; } } diff --git a/core/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java b/core/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java index 7afbeaa9abf..d124fcf6386 100644 --- a/core/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java +++ b/core/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.geo; -import com.spatial4j.core.shape.Rectangle; +import org.locationtech.spatial4j.shape.Rectangle; import com.vividsolutions.jts.geom.Coordinate; import org.elasticsearch.action.get.GetResponse; @@ -299,7 +299,7 @@ public class GeoShapeQueryTests extends ESSingleNodeTestCase { // Create a random geometry collection. GeometryCollectionBuilder gcb = RandomShapeGenerator.createGeometryCollection(getRandom()); - logger.info("Created Random GeometryCollection containing " + gcb.numShapes() + " shapes"); + logger.info("Created Random GeometryCollection containing {} shapes", gcb.numShapes()); client().admin().indices().prepareCreate("test").addMapping("type", "location", "type=geo_shape,tree=quadtree") .execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java b/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java index b8f775639f9..bc2c38ef601 100644 --- a/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java @@ -275,7 +275,7 @@ public class HighlightBuilderTests extends ESTestCase { Index index = new Index(randomAsciiOfLengthBetween(1, 10), "_na_"); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings(index, indexSettings); // shard context will only need indicesQueriesRegistry for building Query objects nested in highlighter - QueryShardContext mockShardContext = new QueryShardContext(idxSettings, null, null, null, null, null, indicesQueriesRegistry) { + QueryShardContext mockShardContext = new QueryShardContext(idxSettings, null, null, null, null, null, indicesQueriesRegistry, null) { @Override public MappedFieldType fieldMapper(String name) { TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name); @@ -400,7 +400,6 @@ public class HighlightBuilderTests extends ESTestCase { context.reset(parser); highlightBuilder = HighlightBuilder.PROTOTYPE.fromXContent(context); assertEquals("expected HighlightBuilder with field", new HighlightBuilder().field(new Field("foo")), highlightBuilder); - System.out.println(Math.log(1/(double)(1+1)) + 1.0); } /** diff --git a/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java b/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java index 9f898a47c06..dbe2714d05d 100644 --- a/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java @@ -268,78 +268,6 @@ public class HighlighterSearchIT extends ESIntegTestCase { equalTo("Test: http://www.facebook.com http://elasticsearch.org http://xing.com http://cnn.com http://quora.com")); } - public void testNgramHighlightingPreLucene42() throws IOException { - assertAcked(prepareCreate("test") - .addMapping("test", - "name", "type=text,analyzer=name_index_analyzer,search_analyzer=name_search_analyzer," + randomStoreField() + "term_vector=with_positions_offsets", - "name2", "type=text,analyzer=name2_index_analyzer,search_analyzer=name_search_analyzer," + randomStoreField() + "term_vector=with_positions_offsets") - .setSettings(settingsBuilder() - .put(indexSettings()) - .put("analysis.filter.my_ngram.max_gram", 20) - .put("analysis.filter.my_ngram.version", "4.1") - .put("analysis.filter.my_ngram.min_gram", 1) - .put("analysis.filter.my_ngram.type", "ngram") - .put("analysis.tokenizer.my_ngramt.max_gram", 20) - .put("analysis.tokenizer.my_ngramt.version", "4.1") - .put("analysis.tokenizer.my_ngramt.min_gram", 1) - .put("analysis.tokenizer.my_ngramt.type", "ngram") - .put("analysis.analyzer.name_index_analyzer.tokenizer", "my_ngramt") - .put("analysis.analyzer.name2_index_analyzer.tokenizer", "whitespace") - .putArray("analysis.analyzer.name2_index_analyzer.filter", "lowercase", "my_ngram") - .put("analysis.analyzer.name_search_analyzer.tokenizer", "whitespace") - .put("analysis.analyzer.name_search_analyzer.filter", "lowercase"))); - ensureYellow(); - client().prepareIndex("test", "test", "1") - .setSource("name", "logicacmg ehemals avinci - the know how company", - "name2", "logicacmg ehemals avinci - the know how company").get(); - client().prepareIndex("test", "test", "2") - .setSource("name", "avinci, unilog avinci, logicacmg, logica", - "name2", "avinci, unilog avinci, logicacmg, logica").get(); - refresh(); - - SearchResponse search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("name", "logica m"))) - .highlighter(new HighlightBuilder().field("name")).get(); - assertHighlight(search, 0, "name", 0, anyOf(equalTo("logicacmg ehemals avinci - the know how company"), - equalTo("avinci, unilog avinci, logicacmg, logica"))); - assertHighlight(search, 1, "name", 0, anyOf(equalTo("logicacmg ehemals avinci - the know how company"), - equalTo("avinci, unilog avinci, logicacmg, logica"))); - - search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("name", "logica ma"))) - .highlighter(new HighlightBuilder().field("name")).get(); - assertHighlight(search, 0, "name", 0, anyOf(equalTo("logicacmg ehemals avinci - the know how company"), - equalTo("avinci, unilog avinci, logicacmg, logica"))); - assertHighlight(search, 1, "name", 0, anyOf(equalTo("logicacmg ehemals avinci - the know how company"), - equalTo("avinci, unilog avinci, logicacmg, logica"))); - - search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("name", "logica"))) - .highlighter(new HighlightBuilder().field("name")).get(); - assertHighlight(search, 0, "name", 0, anyOf(equalTo("logicacmg ehemals avinci - the know how company"), - equalTo("avinci, unilog avinci, logicacmg, logica"))); - assertHighlight(search, 0, "name", 0, anyOf(equalTo("logicacmg ehemals avinci - the know how company"), - equalTo("avinci, unilog avinci, logicacmg, logica"))); - - search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("name2", "logica m"))) - .highlighter(new HighlightBuilder().field("name2")).get(); - assertHighlight(search, 0, "name2", 0, anyOf(equalTo("logicacmg ehemals avinci - the know how company"), - equalTo("avinci, unilog avinci, logicacmg, logica"))); - assertHighlight(search, 1, "name2", 0, anyOf(equalTo("logicacmg ehemals avinci - the know how company"), - equalTo("avinci, unilog avinci, logicacmg, logica"))); - - search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("name2", "logica ma"))) - .highlighter(new HighlightBuilder().field("name2")).get(); - assertHighlight(search, 0, "name2", 0, anyOf(equalTo("logicacmg ehemals avinci - the know how company"), - equalTo("avinci, unilog avinci, logicacmg, logica"))); - assertHighlight(search, 1, "name2", 0, anyOf(equalTo("logicacmg ehemals avinci - the know how company"), - equalTo("avinci, unilog avinci, logicacmg, logica"))); - - search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("name2", "logica"))) - .highlighter(new HighlightBuilder().field("name2")).get(); - assertHighlight(search, 0, "name2", 0, anyOf(equalTo("logicacmg ehemals avinci - the know how company"), - equalTo("avinci, unilog avinci, logicacmg, logica"))); - assertHighlight(search, 1, "name2", 0, anyOf(equalTo("logicacmg ehemals avinci - the know how company"), - equalTo("avinci, unilog avinci, logicacmg, logica"))); - } - public void testNgramHighlighting() throws IOException { assertAcked(prepareCreate("test") .addMapping("test", @@ -2116,7 +2044,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { .query(multiMatchQueryBuilder) .highlighter(highlight().highlightQuery(randomBoolean() ? multiMatchQueryBuilder : null).highlighterType(highlighterType) .field(new Field("field1").requireFieldMatch(true).preTags("").postTags(""))); - logger.info("Running multi-match type: [" + matchQueryType + "] highlight with type: [" + highlighterType + "]"); + logger.info("Running multi-match type: [{}] highlight with type: [{}]", matchQueryType, highlighterType); SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet(); assertHitCount(searchResponse, 1L); assertHighlight(searchResponse, 0, "field1", 0, anyOf(equalTo("The quick brown fox jumps over"), diff --git a/core/src/test/java/org/elasticsearch/search/innerhits/InnerHitsIT.java b/core/src/test/java/org/elasticsearch/search/innerhits/InnerHitsIT.java index fad1cc3a0ef..084e07e0389 100644 --- a/core/src/test/java/org/elasticsearch/search/innerhits/InnerHitsIT.java +++ b/core/src/test/java/org/elasticsearch/search/innerhits/InnerHitsIT.java @@ -20,15 +20,12 @@ package org.elasticsearch.search.innerhits; import org.apache.lucene.util.ArrayUtil; -import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.health.ClusterHealthStatus; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.BoolQueryBuilder; -import org.elasticsearch.index.query.HasChildQueryBuilder; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.support.QueryInnerHits; import org.elasticsearch.plugins.Plugin; @@ -75,7 +72,7 @@ import static org.hamcrest.Matchers.nullValue; public class InnerHitsIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return pluginList(MockScriptEngine.TestPlugin.class, InternalSettingsPlugin.class); + return pluginList(MockScriptEngine.TestPlugin.class); } public void testSimpleNested() throws Exception { @@ -753,160 +750,6 @@ public class InnerHitsIT extends ESIntegTestCase { assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getChild(), nullValue()); } - public void testNestedInnerHitsWithStoredFieldsAndNoSourceBackcompat() throws Exception { - assertAcked(prepareCreate("articles") - .setSettings(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id) - .addMapping("article", jsonBuilder().startObject() - .startObject("_source").field("enabled", false).endObject() - .startObject("properties") - .startObject("comments") - .field("type", "nested") - .startObject("properties") - .startObject("message").field("type", "text").field("store", true).endObject() - .endObject() - .endObject() - .endObject() - .endObject() - ) - ); - - List requests = new ArrayList<>(); - requests.add(client().prepareIndex("articles", "article", "1").setSource(jsonBuilder().startObject() - .field("title", "quick brown fox") - .startObject("comments").field("message", "fox eat quick").endObject() - .endObject())); - indexRandom(true, requests); - - SearchResponse response = client().prepareSearch("articles") - .setQuery(nestedQuery("comments", matchQuery("comments.message", "fox")).innerHit(new QueryInnerHits(null, new InnerHitsBuilder.InnerHit().field("comments.message")))) - .get(); - assertNoFailures(response); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).id(), equalTo("1")); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits(), equalTo(1L)); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).id(), equalTo("1")); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getChild(), nullValue()); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).fields().get("comments.message").getValue(), equalTo("fox eat quick")); - } - - public void testNestedInnerHitsWithHighlightOnStoredFieldBackcompat() throws Exception { - assertAcked(prepareCreate("articles") - .setSettings(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id) - .addMapping("article", jsonBuilder().startObject() - .startObject("_source").field("enabled", false).endObject() - .startObject("properties") - .startObject("comments") - .field("type", "nested") - .startObject("properties") - .startObject("message").field("type", "text").field("store", true).endObject() - .endObject() - .endObject() - .endObject() - .endObject() - ) - ); - - List requests = new ArrayList<>(); - requests.add(client().prepareIndex("articles", "article", "1").setSource(jsonBuilder().startObject() - .field("title", "quick brown fox") - .startObject("comments").field("message", "fox eat quick").endObject() - .endObject())); - indexRandom(true, requests); - InnerHitsBuilder.InnerHit builder = new InnerHitsBuilder.InnerHit(); - builder.highlighter(new HighlightBuilder().field("comments.message")); - SearchResponse response = client().prepareSearch("articles") - .setQuery(nestedQuery("comments", matchQuery("comments.message", "fox")).innerHit(new QueryInnerHits(null, builder))) - .get(); - assertNoFailures(response); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).id(), equalTo("1")); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits(), equalTo(1L)); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).id(), equalTo("1")); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getChild(), nullValue()); - assertThat(String.valueOf(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).highlightFields().get("comments.message").getFragments()[0]), equalTo("fox eat quick")); - } - - public void testNestedInnerHitsWithExcludeSourceBackcompat() throws Exception { - assertAcked(prepareCreate("articles").setSettings(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id) - .addMapping("article", jsonBuilder().startObject() - .startObject("_source").field("excludes", new String[]{"comments"}).endObject() - .startObject("properties") - .startObject("comments") - .field("type", "nested") - .startObject("properties") - .startObject("message").field("type", "text").field("store", true).endObject() - .endObject() - .endObject() - .endObject() - .endObject() - ) - ); - - List requests = new ArrayList<>(); - requests.add(client().prepareIndex("articles", "article", "1").setSource(jsonBuilder().startObject() - .field("title", "quick brown fox") - .startObject("comments").field("message", "fox eat quick").endObject() - .endObject())); - indexRandom(true, requests); - InnerHitsBuilder.InnerHit builder = new InnerHitsBuilder.InnerHit(); - builder.field("comments.message"); - builder.setFetchSource(true); - SearchResponse response = client().prepareSearch("articles") - .setQuery(nestedQuery("comments", matchQuery("comments.message", "fox")).innerHit(new QueryInnerHits(null, builder))) - .get(); - assertNoFailures(response); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).id(), equalTo("1")); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits(), equalTo(1L)); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).id(), equalTo("1")); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getChild(), nullValue()); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).fields().get("comments.message").getValue(), equalTo("fox eat quick")); - } - - public void testNestedInnerHitsHiglightWithExcludeSourceBackcompat() throws Exception { - assertAcked(prepareCreate("articles").setSettings(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id) - .addMapping("article", jsonBuilder().startObject() - .startObject("_source").field("excludes", new String[]{"comments"}).endObject() - .startObject("properties") - .startObject("comments") - .field("type", "nested") - .startObject("properties") - .startObject("message").field("type", "text").field("store", true).endObject() - .endObject() - .endObject() - .endObject() - .endObject() - ) - ); - - List requests = new ArrayList<>(); - requests.add(client().prepareIndex("articles", "article", "1").setSource(jsonBuilder().startObject() - .field("title", "quick brown fox") - .startObject("comments").field("message", "fox eat quick").endObject() - .endObject())); - indexRandom(true, requests); - InnerHitsBuilder.InnerHit builder = new InnerHitsBuilder.InnerHit(); - builder.highlighter(new HighlightBuilder().field("comments.message")); - SearchResponse response = client().prepareSearch("articles") - .setQuery(nestedQuery("comments", matchQuery("comments.message", "fox")).innerHit(new QueryInnerHits(null, builder))) - .get(); - assertNoFailures(response); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).id(), equalTo("1")); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits(), equalTo(1L)); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).id(), equalTo("1")); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getChild(), nullValue()); - assertThat(String.valueOf(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).highlightFields().get("comments.message").getFragments()[0]), equalTo("fox eat quick")); - } - public void testInnerHitsWithObjectFieldThatHasANestedField() throws Exception { assertAcked(prepareCreate("articles") .addMapping("article", jsonBuilder().startObject() diff --git a/core/src/test/java/org/elasticsearch/search/matchedqueries/MatchedQueriesIT.java b/core/src/test/java/org/elasticsearch/search/matchedqueries/MatchedQueriesIT.java index d93d5117274..00fa879f08f 100644 --- a/core/src/test/java/org/elasticsearch/search/matchedqueries/MatchedQueriesIT.java +++ b/core/src/test/java/org/elasticsearch/search/matchedqueries/MatchedQueriesIT.java @@ -53,15 +53,17 @@ public class MatchedQueriesIT extends ESIntegTestCase { refresh(); SearchResponse searchResponse = client().prepareSearch() - .setQuery(boolQuery().must(matchAllQuery()).filter(boolQuery().should(rangeQuery("number").lte(2).queryName("test1")).should(rangeQuery("number").gt(2).queryName("test2")))).get(); +.setQuery(boolQuery().must(matchAllQuery()).filter(boolQuery() + .should(rangeQuery("number").lt(2).queryName("test1")).should(rangeQuery("number").gte(2).queryName("test2")))) + .get(); assertHitCount(searchResponse, 3L); for (SearchHit hit : searchResponse.getHits()) { - if (hit.id().equals("1") || hit.id().equals("2")) { - assertThat(hit.matchedQueries().length, equalTo(1)); - assertThat(hit.matchedQueries(), hasItemInArray("test1")); - } else if (hit.id().equals("3")) { + if (hit.id().equals("3") || hit.id().equals("2")) { assertThat(hit.matchedQueries().length, equalTo(1)); assertThat(hit.matchedQueries(), hasItemInArray("test2")); + } else if (hit.id().equals("1")) { + assertThat(hit.matchedQueries().length, equalTo(1)); + assertThat(hit.matchedQueries(), hasItemInArray("test1")); } else { fail("Unexpected document returned with id " + hit.id()); } diff --git a/core/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java b/core/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java index 38d8287239e..31accf42962 100644 --- a/core/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java +++ b/core/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java @@ -383,7 +383,7 @@ public class MoreLikeThisIT extends ESIntegTestCase { int maxIters = randomIntBetween(10, 20); for (int i = 0; i < maxIters; i++) { int max_query_terms = randomIntBetween(1, values.length); - logger.info("Running More Like This with max_query_terms = %s", max_query_terms); + logger.info("Running More Like This with max_query_terms = {}", max_query_terms); MoreLikeThisQueryBuilder mltQuery = moreLikeThisQuery(new String[] {"text"}, null, new Item[] {new Item(null, null, "0")}) .minTermFreq(1).minDocFreq(1) .maxQueryTerms(max_query_terms).minimumShouldMatch("0%"); @@ -419,7 +419,7 @@ public class MoreLikeThisIT extends ESIntegTestCase { .minTermFreq(1) .minDocFreq(1) .minimumShouldMatch(minimumShouldMatch); - logger.info("Testing with minimum_should_match = " + minimumShouldMatch); + logger.info("Testing with minimum_should_match = {}", minimumShouldMatch); SearchResponse response = client().prepareSearch("test").setTypes("type1") .setQuery(mltQuery).get(); assertSearchResponse(response); diff --git a/core/src/test/java/org/elasticsearch/search/nested/SimpleNestedIT.java b/core/src/test/java/org/elasticsearch/search/nested/SimpleNestedIT.java index 2f932604145..e38ac0ca76e 100644 --- a/core/src/test/java/org/elasticsearch/search/nested/SimpleNestedIT.java +++ b/core/src/test/java/org/elasticsearch/search/nested/SimpleNestedIT.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.sort.SortBuilders; +import org.elasticsearch.search.sort.SortMode; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; @@ -748,7 +749,7 @@ public class SimpleNestedIT extends ESIntegTestCase { .addSort( SortBuilders.fieldSort("parent.child.child_values") .setNestedPath("parent.child") - .sortMode("sum") + .sortMode(SortMode.SUM) .order(SortOrder.ASC) ) .execute().actionGet(); @@ -768,7 +769,7 @@ public class SimpleNestedIT extends ESIntegTestCase { .addSort( SortBuilders.fieldSort("parent.child.child_values") .setNestedPath("parent.child") - .sortMode("sum") + .sortMode(SortMode.SUM) .order(SortOrder.DESC) ) .execute().actionGet(); @@ -789,7 +790,7 @@ public class SimpleNestedIT extends ESIntegTestCase { SortBuilders.fieldSort("parent.child.child_values") .setNestedPath("parent.child") .setNestedFilter(QueryBuilders.termQuery("parent.child.filter", true)) - .sortMode("sum") + .sortMode(SortMode.SUM) .order(SortOrder.ASC) ) .execute().actionGet(); @@ -809,7 +810,7 @@ public class SimpleNestedIT extends ESIntegTestCase { .addSort( SortBuilders.fieldSort("parent.child.child_values") .setNestedPath("parent.child") - .sortMode("avg") + .sortMode(SortMode.AVG) .order(SortOrder.ASC) ) .execute().actionGet(); @@ -828,7 +829,7 @@ public class SimpleNestedIT extends ESIntegTestCase { .addSort( SortBuilders.fieldSort("parent.child.child_values") .setNestedPath("parent.child") - .sortMode("avg") + .sortMode(SortMode.AVG) .order(SortOrder.DESC) ) .execute().actionGet(); @@ -849,7 +850,7 @@ public class SimpleNestedIT extends ESIntegTestCase { SortBuilders.fieldSort("parent.child.child_values") .setNestedPath("parent.child") .setNestedFilter(QueryBuilders.termQuery("parent.child.filter", true)) - .sortMode("avg") + .sortMode(SortMode.AVG) .order(SortOrder.ASC) ) .execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/search/percolator/PercolatorQuerySearchIT.java b/core/src/test/java/org/elasticsearch/search/percolator/PercolatorQuerySearchIT.java new file mode 100644 index 00000000000..55f2ab80121 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/percolator/PercolatorQuerySearchIT.java @@ -0,0 +1,131 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.percolator; + +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.index.percolator.PercolatorFieldMapper; +import org.elasticsearch.search.highlight.HighlightBuilder; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.ESSingleNodeTestCase; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.query.QueryBuilders.boolQuery; +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.index.query.QueryBuilders.matchQuery; +import static org.elasticsearch.index.query.QueryBuilders.percolatorQuery; +import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.hamcrest.Matchers.equalTo; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; + +public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { + + public void testPercolatorQuery() throws Exception { + createIndex("test", client().admin().indices().prepareCreate("test") + .addMapping("type", "field1", "type=keyword", "field2", "type=keyword") + ); + + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "1") + .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) + .get(); + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "2") + .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "value")).endObject()) + .get(); + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "3") + .setSource(jsonBuilder().startObject().field("query", boolQuery() + .must(matchQuery("field1", "value")) + .must(matchQuery("field2", "value")) + ).endObject()).get(); + client().admin().indices().prepareRefresh().get(); + + BytesReference source = jsonBuilder().startObject().endObject().bytes(); + logger.info("percolating empty doc"); + SearchResponse response = client().prepareSearch() + .setQuery(percolatorQuery("type", source)) + .get(); + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + + source = jsonBuilder().startObject().field("field1", "value").endObject().bytes(); + logger.info("percolating doc with 1 field"); + response = client().prepareSearch() + .setQuery(percolatorQuery("type", source)) + .addSort("_uid", SortOrder.ASC) + .get(); + assertHitCount(response, 2); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + + source = jsonBuilder().startObject().field("field1", "value").field("field2", "value").endObject().bytes(); + logger.info("percolating doc with 2 fields"); + response = client().prepareSearch() + .setQuery(percolatorQuery("type", source)) + .addSort("_uid", SortOrder.ASC) + .get(); + assertHitCount(response, 3); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + } + + public void testPercolatorQueryWithHighlighting() throws Exception { + createIndex("test", client().admin().indices().prepareCreate("test") + .addMapping("type", "field1", "type=text") + ); + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "1") + .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "brown fox")).endObject()) + .execute().actionGet(); + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "2") + .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "lazy dog")).endObject()) + .execute().actionGet(); + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "3") + .setSource(jsonBuilder().startObject().field("query", termQuery("field1", "jumps")).endObject()) + .execute().actionGet(); + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "4") + .setSource(jsonBuilder().startObject().field("query", termQuery("field1", "dog")).endObject()) + .execute().actionGet(); + client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "5") + .setSource(jsonBuilder().startObject().field("query", termQuery("field1", "fox")).endObject()) + .execute().actionGet(); + client().admin().indices().prepareRefresh().get(); + + BytesReference document = jsonBuilder().startObject() + .field("field1", "The quick brown fox jumps over the lazy dog") + .endObject().bytes(); + SearchResponse searchResponse = client().prepareSearch() + .setQuery(percolatorQuery("type", document)) + .highlighter(new HighlightBuilder().field("field1")) + .addSort("_uid", SortOrder.ASC) + .get(); + assertHitCount(searchResponse, 5); + + assertThat(searchResponse.getHits().getAt(0).getHighlightFields().get("field1").fragments()[0].string(), + equalTo("The quick brown fox jumps over the lazy dog")); + assertThat(searchResponse.getHits().getAt(1).getHighlightFields().get("field1").fragments()[0].string(), + equalTo("The quick brown fox jumps over the lazy dog")); + assertThat(searchResponse.getHits().getAt(2).getHighlightFields().get("field1").fragments()[0].string(), + equalTo("The quick brown fox jumps over the lazy dog")); + assertThat(searchResponse.getHits().getAt(3).getHighlightFields().get("field1").fragments()[0].string(), + equalTo("The quick brown fox jumps over the lazy dog")); + assertThat(searchResponse.getHits().getAt(4).getHighlightFields().get("field1").fragments()[0].string(), + equalTo("The quick brown fox jumps over the lazy dog"));; + } + +} diff --git a/core/src/test/java/org/elasticsearch/search/profile/QueryProfilerIT.java b/core/src/test/java/org/elasticsearch/search/profile/QueryProfilerIT.java index f09b18bdb8a..d9878774576 100644 --- a/core/src/test/java/org/elasticsearch/search/profile/QueryProfilerIT.java +++ b/core/src/test/java/org/elasticsearch/search/profile/QueryProfilerIT.java @@ -72,7 +72,7 @@ public class QueryProfilerIT extends ESIntegTestCase { int iters = between(20, 100); for (int i = 0; i < iters; i++) { QueryBuilder q = randomQueryBuilder(stringFields, numericFields, numDocs, 3); - logger.info(q.toString()); + logger.info("Query: {}", q); SearchResponse resp = client().prepareSearch() .setQuery(q) @@ -126,13 +126,11 @@ public class QueryProfilerIT extends ESIntegTestCase { int iters = between(1, 10); for (int i = 0; i < iters; i++) { QueryBuilder q = randomQueryBuilder(stringFields, numericFields, numDocs, 3); - logger.info(q.toString()); - + logger.info("Query: {}", q); SearchRequestBuilder vanilla = client().prepareSearch("test") .setQuery(q) .setProfile(false) - .addSort("_score", SortOrder.DESC) .addSort("_uid", SortOrder.ASC) .setPreference("_primary") .setSearchType(SearchType.QUERY_THEN_FETCH); @@ -140,7 +138,6 @@ public class QueryProfilerIT extends ESIntegTestCase { SearchRequestBuilder profile = client().prepareSearch("test") .setQuery(q) .setProfile(true) - .addSort("_score", SortOrder.DESC) .addSort("_uid", SortOrder.ASC) .setPreference("_primary") .setSearchType(SearchType.QUERY_THEN_FETCH); @@ -309,7 +306,7 @@ public class QueryProfilerIT extends ESIntegTestCase { refresh(); QueryBuilder q = QueryBuilders.boolQuery(); - logger.info(q.toString()); + logger.info("Query: {}", q); SearchResponse resp = client().prepareSearch() .setQuery(q) @@ -360,8 +357,7 @@ public class QueryProfilerIT extends ESIntegTestCase { QueryBuilder q = QueryBuilders.boolQuery().must(QueryBuilders.boolQuery().must(QueryBuilders.boolQuery().must(QueryBuilders.matchQuery("field1", "one")))); - - logger.info(q.toString()); + logger.info("Query: {}", q); SearchResponse resp = client().prepareSearch() .setQuery(q) @@ -408,7 +404,7 @@ public class QueryProfilerIT extends ESIntegTestCase { QueryBuilder q = QueryBuilders.boostingQuery(QueryBuilders.matchQuery("field1", "one"), QueryBuilders.matchQuery("field1", "two")) .boost(randomFloat()) .negativeBoost(randomFloat()); - logger.info(q.toString()); + logger.info("Query: {}", q); SearchResponse resp = client().prepareSearch() .setQuery(q) @@ -455,7 +451,7 @@ public class QueryProfilerIT extends ESIntegTestCase { QueryBuilder q = QueryBuilders.disMaxQuery() .boost(0.33703882f) .add(QueryBuilders.rangeQuery("field2").from(null).to(73).includeLower(true).includeUpper(true)); - logger.info(q.toString()); + logger.info("Query: {}", q); SearchResponse resp = client().prepareSearch() .setQuery(q) @@ -501,7 +497,7 @@ public class QueryProfilerIT extends ESIntegTestCase { QueryBuilder q = QueryBuilders.rangeQuery("field2").from(0).to(5); - logger.info(q.toString()); + logger.info("Query: {}", q.toString()); SearchResponse resp = client().prepareSearch() .setQuery(q) @@ -547,7 +543,7 @@ public class QueryProfilerIT extends ESIntegTestCase { QueryBuilder q = QueryBuilders.matchPhraseQuery("field1", "one two"); - logger.info(q.toString()); + logger.info("Query: {}", q); SearchResponse resp = client().prepareSearch() .setQuery(q) @@ -559,7 +555,7 @@ public class QueryProfilerIT extends ESIntegTestCase { if (resp.getShardFailures().length > 0) { for (ShardSearchFailure f : resp.getShardFailures()) { - logger.error(f.toString()); + logger.error("Shard search failure: {}", f); } fail(); } @@ -603,7 +599,7 @@ public class QueryProfilerIT extends ESIntegTestCase { refresh(); QueryBuilder q = QueryBuilders.rangeQuery("field2").from(0).to(5); - logger.info(q.toString()); + logger.info("Query: {}", q); SearchResponse resp = client().prepareSearch().setQuery(q).setProfile(false).execute().actionGet(); assertThat("Profile response element should be an empty map", resp.getProfileResults().size(), equalTo(0)); diff --git a/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java b/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java index f65b17288ae..be190b547ea 100644 --- a/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java @@ -156,12 +156,12 @@ public class MultiMatchQueryIT extends ESIntegTestCase { .endObject() .startObject("first_name") .field("type", "text") - .field("omit_norms", "true") + .field("norms", false) .field("copy_to", "first_name_phrase") .endObject() .startObject("last_name") .field("type", "text") - .field("omit_norms", "true") + .field("norms", false) .field("copy_to", "last_name_phrase") .endObject() .endObject() @@ -180,7 +180,7 @@ public class MultiMatchQueryIT extends ESIntegTestCase { // the doc id is the tie-breaker } assertThat(topNIds, empty()); - assertThat(searchResponse.getHits().hits()[0].getScore(), equalTo(searchResponse.getHits().hits()[1].getScore())); + assertThat(searchResponse.getHits().hits()[0].getScore(), greaterThan(searchResponse.getHits().hits()[1].getScore())); searchResponse = client().prepareSearch("test") .setQuery(randomizeType(multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category") @@ -567,7 +567,7 @@ public class MultiMatchQueryIT extends ESIntegTestCase { // test if boosts work searchResponse = client().prepareSearch("test") - .setQuery(randomizeType(multiMatchQuery("the ultimate", "full_name", "first_name", "last_name", "category").field("last_name", 2) + .setQuery(randomizeType(multiMatchQuery("the ultimate", "full_name", "first_name", "last_name", "category").field("last_name", 10) .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) .operator(Operator.AND))).get(); assertFirstHit(searchResponse, hasId("ultimate1")); // has ultimate in the last_name and that is boosted diff --git a/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java b/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java index 079363719f1..891911b6202 100644 --- a/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -117,7 +117,7 @@ public class SearchQueryIT extends ESIntegTestCase { public void testOmitNormsOnAll() throws ExecutionException, InterruptedException, IOException { assertAcked(prepareCreate("test") .addMapping("type1", jsonBuilder().startObject().startObject("type1") - .startObject("_all").field("omit_norms", true).endObject() + .startObject("_all").field("norms", false).endObject() .endObject().endObject()) .setSettings(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)); // only one shard otherwise IDF might be different for comparing scores @@ -375,9 +375,9 @@ public class SearchQueryIT extends ESIntegTestCase { // try the same with multi match query searchResponse = client().prepareSearch().setQuery(multiMatchQuery("the quick brown", "field1", "field2").cutoffFrequency(3).operator(Operator.AND)).get(); assertHitCount(searchResponse, 3L); - assertFirstHit(searchResponse, hasId("3")); // better score due to different query stats - assertSecondHit(searchResponse, hasId("1")); - assertThirdHit(searchResponse, hasId("2")); + assertFirstHit(searchResponse, hasId("1")); + assertSecondHit(searchResponse, hasId("2")); + assertThirdHit(searchResponse, hasId("3")); } public void testCommonTermsQueryStackedTokens() throws Exception { @@ -467,9 +467,9 @@ public class SearchQueryIT extends ESIntegTestCase { // try the same with multi match query searchResponse = client().prepareSearch().setQuery(multiMatchQuery("the fast brown", "field1", "field2").cutoffFrequency(3).operator(Operator.AND)).get(); assertHitCount(searchResponse, 3L); - assertFirstHit(searchResponse, hasId("3")); // better score due to different query stats - assertSecondHit(searchResponse, hasId("1")); - assertThirdHit(searchResponse, hasId("2")); + assertFirstHit(searchResponse, hasId("1")); + assertSecondHit(searchResponse, hasId("2")); + assertThirdHit(searchResponse, hasId("3")); } public void testQueryStringAnalyzedWildcard() throws Exception { @@ -1914,6 +1914,7 @@ public class SearchQueryIT extends ESIntegTestCase { assertHitCount(client().prepareSearch("test").setSize(0).setQuery(rangeQuery("field").lte(-999999999999L)).get(), 3); } + @AwaitsFix(bugUrl = "NOCOMMIT") public void testRangeQueryWithTimeZone() throws Exception { assertAcked(prepareCreate("test") .addMapping("type1", "date", "type=date", "num", "type=integer")); diff --git a/core/src/test/java/org/elasticsearch/search/rescore/QueryRescoreBuilderTests.java b/core/src/test/java/org/elasticsearch/search/rescore/QueryRescoreBuilderTests.java index a39c618fe9d..e0673a64ee1 100644 --- a/core/src/test/java/org/elasticsearch/search/rescore/QueryRescoreBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/rescore/QueryRescoreBuilderTests.java @@ -39,7 +39,6 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; -import org.elasticsearch.index.mapper.core.StringFieldMapper; import org.elasticsearch.index.mapper.core.TextFieldMapper; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; @@ -110,14 +109,17 @@ public class QueryRescoreBuilderTests extends ESTestCase { assertTrue("rescore builder is not equal to self", secondBuilder.equals(secondBuilder)); assertTrue("rescore builder is not equal to its copy", firstBuilder.equals(secondBuilder)); assertTrue("equals is not symmetric", secondBuilder.equals(firstBuilder)); - assertThat("rescore builder copy's hashcode is different from original hashcode", secondBuilder.hashCode(), equalTo(firstBuilder.hashCode())); + assertThat("rescore builder copy's hashcode is different from original hashcode", secondBuilder.hashCode(), + equalTo(firstBuilder.hashCode())); RescoreBuilder thirdBuilder = serializedCopy(secondBuilder); assertTrue("rescore builder is not equal to self", thirdBuilder.equals(thirdBuilder)); assertTrue("rescore builder is not equal to its copy", secondBuilder.equals(thirdBuilder)); - assertThat("rescore builder copy's hashcode is different from original hashcode", secondBuilder.hashCode(), equalTo(thirdBuilder.hashCode())); + assertThat("rescore builder copy's hashcode is different from original hashcode", secondBuilder.hashCode(), + equalTo(thirdBuilder.hashCode())); assertTrue("equals is not transitive", firstBuilder.equals(thirdBuilder)); - assertThat("rescore builder copy's hashcode is different from original hashcode", firstBuilder.hashCode(), equalTo(thirdBuilder.hashCode())); + assertThat("rescore builder copy's hashcode is different from original hashcode", firstBuilder.hashCode(), + equalTo(thirdBuilder.hashCode())); assertTrue("equals is not symmetric", thirdBuilder.equals(secondBuilder)); assertTrue("equals is not symmetric", thirdBuilder.equals(firstBuilder)); } @@ -160,7 +162,8 @@ public class QueryRescoreBuilderTests extends ESTestCase { .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings(randomAsciiOfLengthBetween(1, 10), indexSettings); // shard context will only need indicesQueriesRegistry for building Query objects nested in query rescorer - QueryShardContext mockShardContext = new QueryShardContext(idxSettings, null, null, null, null, null, indicesQueriesRegistry) { + QueryShardContext mockShardContext = new QueryShardContext(idxSettings, null, null, null, null, null, indicesQueriesRegistry, + null) { @Override public MappedFieldType fieldMapper(String name) { TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name); @@ -173,7 +176,8 @@ public class QueryRescoreBuilderTests extends ESTestCase { QueryRescoreContext rescoreContext = rescoreBuilder.build(mockShardContext); XContentParser parser = createParser(rescoreBuilder); - QueryRescoreContext parsedRescoreContext = (QueryRescoreContext) new RescoreParseElement().parseSingleRescoreContext(parser, mockShardContext); + QueryRescoreContext parsedRescoreContext = (QueryRescoreContext) new RescoreParseElement().parseSingleRescoreContext(parser, + mockShardContext); assertNotSame(rescoreContext, parsedRescoreContext); assertEquals(rescoreContext.window(), parsedRescoreContext.window()); assertEquals(rescoreContext.query(), parsedRescoreContext.query()); @@ -316,7 +320,8 @@ public class QueryRescoreBuilderTests extends ESTestCase { * create random shape that is put under test */ public static org.elasticsearch.search.rescore.QueryRescorerBuilder randomRescoreBuilder() { - QueryBuilder queryBuilder = new MatchAllQueryBuilder().boost(randomFloat()).queryName(randomAsciiOfLength(20)); + QueryBuilder queryBuilder = new MatchAllQueryBuilder().boost(randomFloat()) + .queryName(randomAsciiOfLength(20)); org.elasticsearch.search.rescore.QueryRescorerBuilder rescorer = new org.elasticsearch.search.rescore.QueryRescorerBuilder(queryBuilder); if (randomBoolean()) { diff --git a/core/src/test/java/org/elasticsearch/search/scroll/DuelScrollIT.java b/core/src/test/java/org/elasticsearch/search/scroll/DuelScrollIT.java index 292f9a495dc..6c0b9963940 100644 --- a/core/src/test/java/org/elasticsearch/search/scroll/DuelScrollIT.java +++ b/core/src/test/java/org/elasticsearch/search/scroll/DuelScrollIT.java @@ -282,8 +282,8 @@ public class DuelScrollIT extends ESIntegTestCase { } assertEquals(control.getHits().getTotalHits(), scrollDocs); } catch (AssertionError e) { - logger.info("Control:\n" + control); - logger.info("Scroll size=" + size + ", from=" + scrollDocs + ":\n" + scroll); + logger.info("Control:\n{}", control); + logger.info("Scroll size={}, from={}:\n{}", size, scrollDocs, scroll); throw e; } finally { clearScroll(scroll.getScrollId()); diff --git a/core/src/test/java/org/elasticsearch/search/simple/SimpleSearchIT.java b/core/src/test/java/org/elasticsearch/search/simple/SimpleSearchIT.java index 6c10a1c8aef..136c1fba2e0 100644 --- a/core/src/test/java/org/elasticsearch/search/simple/SimpleSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/simple/SimpleSearchIT.java @@ -29,7 +29,6 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.search.internal.DefaultSearchContext; import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; @@ -270,7 +269,7 @@ public class SimpleSearchIT extends ESIntegTestCase { searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(max)) .setTerminateAfter(i).execute().actionGet(); - assertHitCount(searchResponse, (long)i); + assertHitCount(searchResponse, i); assertTrue(searchResponse.isTerminatedEarly()); } diff --git a/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java b/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java index dfea1a9316b..f7f9edbc0b2 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.sort; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; @@ -43,7 +42,7 @@ import java.io.IOException; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; -public abstract class AbstractSortTestCase & ToXContent & SortElementParserTemp> extends ESTestCase { +public abstract class AbstractSortTestCase> extends ESTestCase { protected static NamedWriteableRegistry namedWriteableRegistry; @@ -53,7 +52,10 @@ public abstract class AbstractSortTestCase & ToXCont @BeforeClass public static void init() { namedWriteableRegistry = new NamedWriteableRegistry(); - namedWriteableRegistry.registerPrototype(GeoDistanceSortBuilder.class, GeoDistanceSortBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(SortBuilder.class, GeoDistanceSortBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(SortBuilder.class, ScoreSortBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(SortBuilder.class, ScriptSortBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(SortBuilder.class, FieldSortBuilder.PROTOTYPE); indicesQueriesRegistry = new SearchModule(Settings.EMPTY, namedWriteableRegistry).buildQueryParserRegistry(); } @@ -85,9 +87,9 @@ public abstract class AbstractSortTestCase & ToXCont XContentParser itemParser = XContentHelper.createParser(builder.bytes()); itemParser.nextToken(); - + /* - * filter out name of sort, or field name to sort on for element fieldSort + * filter out name of sort, or field name to sort on for element fieldSort */ itemParser.nextToken(); String elementName = itemParser.currentName(); @@ -95,7 +97,7 @@ public abstract class AbstractSortTestCase & ToXCont QueryParseContext context = new QueryParseContext(indicesQueriesRegistry); context.reset(itemParser); - NamedWriteable parsedItem = testItem.fromXContent(context, elementName); + SortBuilder parsedItem = testItem.fromXContent(context, elementName); assertNotSame(testItem, parsedItem); assertEquals(testItem, parsedItem); assertEquals(testItem.hashCode(), parsedItem.hashCode()); @@ -146,17 +148,16 @@ public abstract class AbstractSortTestCase & ToXCont } } + @SuppressWarnings("unchecked") protected T copyItem(T original) throws IOException { try (BytesStreamOutput output = new BytesStreamOutput()) { original.writeTo(output); try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(output.bytes()), namedWriteableRegistry)) { - @SuppressWarnings("unchecked") - T prototype = (T) namedWriteableRegistry.getPrototype(getPrototype(), original.getWriteableName()); - T copy = (T) prototype.readFrom(in); + T prototype = (T) namedWriteableRegistry.getPrototype(SortBuilder.class, + original.getWriteableName()); + T copy = prototype.readFrom(in); return copy; } } } - - protected abstract Class getPrototype(); } diff --git a/core/src/test/java/org/elasticsearch/search/sort/FieldSortBuilderTests.java b/core/src/test/java/org/elasticsearch/search/sort/FieldSortBuilderTests.java new file mode 100644 index 00000000000..025f7930165 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/sort/FieldSortBuilderTests.java @@ -0,0 +1,85 @@ +/* +x * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.sort; + +import java.io.IOException; + +public class FieldSortBuilderTests extends AbstractSortTestCase { + + @Override + protected FieldSortBuilder createTestItem() { + String fieldName = randomAsciiOfLengthBetween(1, 10); + FieldSortBuilder builder = new FieldSortBuilder(fieldName); + if (randomBoolean()) { + builder.order(RandomSortDataGenerator.order(builder.order())); + } + + if (randomBoolean()) { + builder.missing(RandomSortDataGenerator.missing(builder.missing())); + } + + if (randomBoolean()) { + builder.unmappedType(RandomSortDataGenerator.randomAscii(builder.unmappedType())); + } + + if (randomBoolean()) { + builder.sortMode(RandomSortDataGenerator.mode(builder.sortMode())); + } + + if (randomBoolean()) { + builder.setNestedFilter(RandomSortDataGenerator.nestedFilter(builder.getNestedFilter())); + } + + if (randomBoolean()) { + builder.setNestedPath(RandomSortDataGenerator.randomAscii(builder.getNestedPath())); + } + + return builder; + } + + @Override + protected FieldSortBuilder mutate(FieldSortBuilder original) throws IOException { + FieldSortBuilder mutated = new FieldSortBuilder(original); + int parameter = randomIntBetween(0, 5); + switch (parameter) { + case 0: + mutated.setNestedPath(RandomSortDataGenerator.randomAscii(mutated.getNestedPath())); + break; + case 1: + mutated.setNestedFilter(RandomSortDataGenerator.nestedFilter(mutated.getNestedFilter())); + break; + case 2: + mutated.sortMode(RandomSortDataGenerator.mode(mutated.sortMode())); + break; + case 3: + mutated.unmappedType(RandomSortDataGenerator.randomAscii(mutated.unmappedType())); + break; + case 4: + mutated.missing(RandomSortDataGenerator.missing(mutated.missing())); + break; + case 5: + mutated.order(RandomSortDataGenerator.order(mutated.order())); + break; + default: + throw new IllegalStateException("Unsupported mutation."); + } + return mutated; + } +} diff --git a/core/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java b/core/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java index 894f0fbe6b9..ef313620b59 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java +++ b/core/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java @@ -19,44 +19,6 @@ package org.elasticsearch.search.sort; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery; -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.fieldValueFactorFunction; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; -import static org.hamcrest.Matchers.closeTo; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.lessThan; -import static org.hamcrest.Matchers.lessThanOrEqualTo; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.Locale; -import java.util.Random; -import java.util.Set; -import java.util.TreeMap; -import java.util.Map.Entry; -import java.util.concurrent.ExecutionException; - import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; @@ -80,6 +42,43 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalSettingsPlugin; import org.hamcrest.Matchers; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Locale; +import java.util.Map.Entry; +import java.util.Random; +import java.util.Set; +import java.util.TreeMap; +import java.util.concurrent.ExecutionException; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery; +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.fieldValueFactorFunction; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; +import static org.hamcrest.Matchers.closeTo; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + public class FieldSortIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { @@ -985,7 +984,7 @@ public class FieldSortIT extends ESIntegTestCase { searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) .setSize(10) - .addSort(SortBuilders.fieldSort("long_values").order(SortOrder.DESC).sortMode("sum")) + .addSort(SortBuilders.fieldSort("long_values").order(SortOrder.DESC).sortMode(SortMode.SUM)) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(3L)); diff --git a/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java b/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java index d40cbf93002..ed733fd4cd7 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java +++ b/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java @@ -270,7 +270,7 @@ public class GeoDistanceIT extends ESIntegTestCase { // Order: Asc, Mode: max searchResponse = client().prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.ASC).sortMode("max")) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.ASC).sortMode(SortMode.MAX)) .execute().actionGet(); assertHitCount(searchResponse, 5); @@ -296,7 +296,7 @@ public class GeoDistanceIT extends ESIntegTestCase { // Order: Desc, Mode: min searchResponse = client().prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC).sortMode("min")) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC).sortMode(SortMode.MIN)) .execute().actionGet(); assertHitCount(searchResponse, 5); @@ -308,7 +308,7 @@ public class GeoDistanceIT extends ESIntegTestCase { assertThat(((Number) searchResponse.getHits().getAt(4).sortValues()[0]).doubleValue(), closeTo(0d, 10d)); searchResponse = client().prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).sortMode("avg").order(SortOrder.ASC)) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).sortMode(SortMode.AVG).order(SortOrder.ASC)) .execute().actionGet(); assertHitCount(searchResponse, 5); @@ -320,7 +320,7 @@ public class GeoDistanceIT extends ESIntegTestCase { assertThat(((Number) searchResponse.getHits().getAt(4).sortValues()[0]).doubleValue(), closeTo(5301d, 10d)); searchResponse = client().prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).sortMode("avg").order(SortOrder.DESC)) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).sortMode(SortMode.AVG).order(SortOrder.DESC)) .execute().actionGet(); assertHitCount(searchResponse, 5); @@ -333,7 +333,7 @@ public class GeoDistanceIT extends ESIntegTestCase { try { client().prepareSearch("test").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).sortMode("sum")); + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).sortMode(SortMode.SUM)); fail("sum should not be supported for sorting by geo distance"); } catch (IllegalArgumentException e) { // expected @@ -455,7 +455,7 @@ public class GeoDistanceIT extends ESIntegTestCase { // Order: Asc, Mode: max searchResponse = client() .prepareSearch("companies").setQuery(matchAllQuery()).addSort(SortBuilders.geoDistanceSort("branches.location", - 40.7143528, -74.0059731).order(SortOrder.ASC).sortMode("max").setNestedPath("branches")) + 40.7143528, -74.0059731).order(SortOrder.ASC).sortMode(SortMode.MAX).setNestedPath("branches")) .execute().actionGet(); assertHitCount(searchResponse, 4); @@ -480,7 +480,7 @@ public class GeoDistanceIT extends ESIntegTestCase { // Order: Desc, Mode: min searchResponse = client() .prepareSearch("companies").setQuery(matchAllQuery()).addSort(SortBuilders.geoDistanceSort("branches.location", - 40.7143528, -74.0059731).order(SortOrder.DESC).sortMode("min").setNestedPath("branches")) + 40.7143528, -74.0059731).order(SortOrder.DESC).sortMode(SortMode.MIN).setNestedPath("branches")) .execute().actionGet(); assertHitCount(searchResponse, 4); @@ -492,7 +492,7 @@ public class GeoDistanceIT extends ESIntegTestCase { searchResponse = client() .prepareSearch("companies").setQuery(matchAllQuery()).addSort(SortBuilders.geoDistanceSort("branches.location", - 40.7143528, -74.0059731).sortMode("avg").order(SortOrder.ASC).setNestedPath("branches")) + 40.7143528, -74.0059731).sortMode(SortMode.AVG).order(SortOrder.ASC).setNestedPath("branches")) .execute().actionGet(); assertHitCount(searchResponse, 4); @@ -504,7 +504,7 @@ public class GeoDistanceIT extends ESIntegTestCase { searchResponse = client().prepareSearch("companies") .setQuery(matchAllQuery()).addSort(SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) - .setNestedPath("branches").sortMode("avg").order(SortOrder.DESC).setNestedPath("branches")) + .setNestedPath("branches").sortMode(SortMode.AVG).order(SortOrder.DESC).setNestedPath("branches")) .execute().actionGet(); assertHitCount(searchResponse, 4); @@ -517,7 +517,7 @@ public class GeoDistanceIT extends ESIntegTestCase { searchResponse = client().prepareSearch("companies").setQuery(matchAllQuery()) .addSort(SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731) .setNestedFilter(termQuery("branches.name", "brooklyn")) - .sortMode("avg").order(SortOrder.ASC).setNestedPath("branches")) + .sortMode(SortMode.AVG).order(SortOrder.ASC).setNestedPath("branches")) .execute().actionGet(); assertHitCount(searchResponse, 4); assertFirstHit(searchResponse, hasId("4")); @@ -529,7 +529,7 @@ public class GeoDistanceIT extends ESIntegTestCase { try { client().prepareSearch("companies").setQuery(matchAllQuery()) - .addSort(SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731).sortMode("sum") + .addSort(SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731).sortMode(SortMode.SUM) .setNestedPath("branches")); fail("Sum should not be allowed as sort mode"); } catch (IllegalArgumentException e) { @@ -567,11 +567,11 @@ public class GeoDistanceIT extends ESIntegTestCase { assertHitCount(result, 1); } - private double randomLon() { + private static double randomLon() { return randomDouble() * 360 - 180; } - private double randomLat() { + private static double randomLat() { return randomDouble() * 180 - 90; } @@ -619,7 +619,7 @@ public class GeoDistanceIT extends ESIntegTestCase { } } - private long assertDuelOptimization(SearchResponse resp) { + private static long assertDuelOptimization(SearchResponse resp) { long matches = -1; assertSearchResponse(resp); if (matches < 0) { diff --git a/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java b/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java index dc06c43cb85..e7f9b167999 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java +++ b/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java @@ -78,8 +78,8 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase { GeoPoint[] d2Points = {new GeoPoint(5, 1), new GeoPoint(6, 2)}; createShuffeldJSONArray(d2Builder, d2Points); - logger.info(d1Builder.string()); - logger.info(d2Builder.string()); + logger.info("d1: {}", d1Builder); + logger.info("d2: {}", d2Builder); indexRandom(true, client().prepareIndex("index", "type", "d1").setSource(d1Builder), client().prepareIndex("index", "type", "d2").setSource(d2Builder)); @@ -95,7 +95,7 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase { SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) - .addSort(new GeoDistanceSortBuilder("location", q).sortMode("min").order(SortOrder.ASC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS)) + .addSort(new GeoDistanceSortBuilder("location", q).sortMode(SortMode.MIN).order(SortOrder.ASC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS)) .execute().actionGet(); assertOrderedSearchHits(searchResponse, "d1", "d2"); assertThat((Double)searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(2, 2, 3, 2, DistanceUnit.KILOMETERS), 0.01d)); @@ -103,7 +103,7 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase { searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) - .addSort(new GeoDistanceSortBuilder("location", q).sortMode("min").order(SortOrder.DESC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS)) + .addSort(new GeoDistanceSortBuilder("location", q).sortMode(SortMode.MIN).order(SortOrder.DESC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS)) .execute().actionGet(); assertOrderedSearchHits(searchResponse, "d2", "d1"); assertThat((Double)searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(2, 1, 5, 1, DistanceUnit.KILOMETERS), 0.01d)); @@ -111,7 +111,7 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase { searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) - .addSort(new GeoDistanceSortBuilder("location", q).sortMode("max").order(SortOrder.ASC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS)) + .addSort(new GeoDistanceSortBuilder("location", q).sortMode(SortMode.MAX).order(SortOrder.ASC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS)) .execute().actionGet(); assertOrderedSearchHits(searchResponse, "d1", "d2"); assertThat((Double)searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(2, 2, 4, 1, DistanceUnit.KILOMETERS), 0.01d)); @@ -119,7 +119,7 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase { searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) - .addSort(new GeoDistanceSortBuilder("location", q).sortMode("max").order(SortOrder.DESC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS)) + .addSort(new GeoDistanceSortBuilder("location", q).sortMode(SortMode.MAX).order(SortOrder.DESC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS)) .execute().actionGet(); assertOrderedSearchHits(searchResponse, "d2", "d1"); assertThat((Double)searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(2, 1, 6, 2, DistanceUnit.KILOMETERS), 0.01d)); @@ -194,7 +194,7 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase { SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) - .addSort(geoDistanceSortBuilder.sortMode("min").order(SortOrder.ASC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS)) + .addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS)) .execute().actionGet(); assertOrderedSearchHits(searchResponse, "d1", "d2"); assertThat((Double) searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(2.5, 1, 2, 1, DistanceUnit.KILOMETERS), 1.e-4)); @@ -202,7 +202,7 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase { searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) - .addSort(geoDistanceSortBuilder.sortMode("max").order(SortOrder.ASC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS)) + .addSort(geoDistanceSortBuilder.sortMode(SortMode.MAX).order(SortOrder.ASC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS)) .execute().actionGet(); assertOrderedSearchHits(searchResponse, "d1", "d2"); assertThat((Double) searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(3.25, 4, 2, 1, DistanceUnit.KILOMETERS), 1.e-4)); @@ -223,7 +223,7 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase { SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) - .addSort(geoDistanceSortBuilder.sortMode("min").order(SortOrder.ASC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS)) + .addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS)) .execute().actionGet(); checkCorrectSortOrderForGeoSort(searchResponse); @@ -231,7 +231,7 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase { searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) - .addSort(geoDistanceSortBuilder.sortMode("min").order(SortOrder.ASC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS)) + .addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS)) .execute().actionGet(); checkCorrectSortOrderForGeoSort(searchResponse); @@ -239,7 +239,7 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase { searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) - .addSort(geoDistanceSortBuilder.sortMode("min").order(SortOrder.ASC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS)) + .addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS)) .execute().actionGet(); checkCorrectSortOrderForGeoSort(searchResponse); @@ -263,9 +263,17 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase { new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort("location", 2.0, 2.0) .unit(DistanceUnit.KILOMETERS).geoDistance(GeoDistance.PLANE))).execute().actionGet(); checkCorrectSortOrderForGeoSort(searchResponse); + + searchResponse = client() + .prepareSearch() + .setSource( + new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort("location", 2.0, 2.0) + .unit(DistanceUnit.KILOMETERS).geoDistance(GeoDistance.PLANE) + .ignoreMalformed(true).coerce(true))).execute().actionGet(); + checkCorrectSortOrderForGeoSort(searchResponse); } - private void checkCorrectSortOrderForGeoSort(SearchResponse searchResponse) { + private static void checkCorrectSortOrderForGeoSort(SearchResponse searchResponse) { assertOrderedSearchHits(searchResponse, "d2", "d1"); assertThat((Double) searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(2, 2, 1, 2, DistanceUnit.KILOMETERS), 1.e-4)); assertThat((Double) searchResponse.getHits().getAt(1).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(2, 2, 1, 1, DistanceUnit.KILOMETERS), 1.e-4)); diff --git a/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java b/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java index e957db58b38..50e4aeeb71b 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java @@ -27,7 +27,6 @@ import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.geo.RandomGeoGenerator; import java.io.IOException; @@ -60,7 +59,7 @@ public class GeoDistanceSortBuilderTests extends AbstractSortTestCase getPrototype() { - return (Class) GeoDistanceSortBuilder.PROTOTYPE.getClass(); - } - public void testSortModeSumIsRejectedInSetter() { GeoDistanceSortBuilder builder = new GeoDistanceSortBuilder("testname", -1, -1); GeoPoint point = RandomGeoGenerator.randomPoint(getRandom()); builder.point(point.getLat(), point.getLon()); try { - builder.sortMode("SUM"); + builder.sortMode(SortMode.SUM); fail("sort mode sum should not be supported"); } catch (IllegalArgumentException e) { // all good } } - + public void testSortModeSumIsRejectedInJSON() throws IOException { - String json = "{\n" + - " \"testname\" : [ {\n" + - " \"lat\" : -6.046997540714173,\n" + - " \"lon\" : -51.94128329747579\n" + - " } ],\n" + - " \"unit\" : \"m\",\n" + - " \"distance_type\" : \"sloppy_arc\",\n" + - " \"reverse\" : true,\n" + - " \"mode\" : \"SUM\",\n" + - " \"coerce\" : false,\n" + - " \"ignore_malformed\" : false\n" + + String json = "{\n" + + " \"testname\" : [ {\n" + + " \"lat\" : -6.046997540714173,\n" + + " \"lon\" : -51.94128329747579\n" + + " } ],\n" + + " \"unit\" : \"m\",\n" + + " \"distance_type\" : \"sloppy_arc\",\n" + + " \"reverse\" : true,\n" + + " \"mode\" : \"SUM\",\n" + + " \"coerce\" : false,\n" + + " \"ignore_malformed\" : false\n" + "}"; XContentParser itemParser = XContentHelper.createParser(new BytesArray(json)); itemParser.nextToken(); - + QueryParseContext context = new QueryParseContext(indicesQueriesRegistry); context.reset(itemParser); @@ -216,28 +208,28 @@ public class GeoDistanceSortBuilderTests extends AbstractSortTestCase { + + @Override + protected ScoreSortBuilder createTestItem() { + return new ScoreSortBuilder().order(randomBoolean() ? SortOrder.ASC : SortOrder.DESC); + } + + @Override + protected ScoreSortBuilder mutate(ScoreSortBuilder original) throws IOException { + ScoreSortBuilder result = new ScoreSortBuilder(); + if (original.order() == SortOrder.ASC) { + result.order(SortOrder.DESC); + } else { + result.order(SortOrder.ASC); + } + return result; + } + + @Rule + public ExpectedException exceptionRule = ExpectedException.none(); + + /** + * test passing null to {@link ScoreSortBuilder#order(SortOrder)} is illegal + */ + public void testIllegalOrder() { + exceptionRule.expect(NullPointerException.class); + exceptionRule.expectMessage("sort order cannot be null."); + new ScoreSortBuilder().order(null); + } + + /** + * test parsing order parameter if specified as `order` field in the json + * instead of the `reverse` field that we render in toXContent + */ + public void testParseOrder() throws IOException { + QueryParseContext context = new QueryParseContext(indicesQueriesRegistry); + context.parseFieldMatcher(new ParseFieldMatcher(Settings.EMPTY)); + SortOrder order = randomBoolean() ? SortOrder.ASC : SortOrder.DESC; + String scoreSortString = "{ \"_score\": { \"order\": \""+ order.toString() +"\" }}"; + XContentParser parser = XContentFactory.xContent(scoreSortString).createParser(scoreSortString); + // need to skip until parser is located on second START_OBJECT + parser.nextToken(); + parser.nextToken(); + parser.nextToken(); + + context.reset(parser); + ScoreSortBuilder scoreSort = ScoreSortBuilder.PROTOTYPE.fromXContent(context, "_score"); + assertEquals(order, scoreSort.order()); + } +} diff --git a/core/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java b/core/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java new file mode 100644 index 00000000000..091a6c3002a --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java @@ -0,0 +1,241 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.sort; + + +import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryParseContext; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.search.sort.ScriptSortBuilder.ScriptSortType; +import org.junit.Rule; +import org.junit.rules.ExpectedException; + +import java.io.IOException; + +public class ScriptSortBuilderTests extends AbstractSortTestCase { + + @Override + protected ScriptSortBuilder createTestItem() { + ScriptSortBuilder builder = new ScriptSortBuilder(new Script(randomAsciiOfLengthBetween(5, 10)), + randomBoolean() ? ScriptSortType.NUMBER : ScriptSortType.STRING); + if (randomBoolean()) { + builder.order(RandomSortDataGenerator.order(builder.order())); + } + if (randomBoolean()) { + builder.sortMode(RandomSortDataGenerator.mode(builder.sortMode())); + } + if (randomBoolean()) { + builder.setNestedFilter(RandomSortDataGenerator.nestedFilter(builder.getNestedFilter())); + } + if (randomBoolean()) { + builder.setNestedPath(RandomSortDataGenerator.randomAscii(builder.getNestedPath())); + } + return builder; + } + + @Override + protected ScriptSortBuilder mutate(ScriptSortBuilder original) throws IOException { + ScriptSortBuilder result; + if (randomBoolean()) { + // change one of the constructor args, copy the rest over + Script script = original.script(); + ScriptSortType type = original.type(); + if (randomBoolean()) { + result = new ScriptSortBuilder(new Script(script.getScript() + "_suffix"), type); + } else { + result = new ScriptSortBuilder(script, type.equals(ScriptSortType.NUMBER) ? ScriptSortType.STRING : ScriptSortType.NUMBER); + } + result.order(original.order()); + if (original.sortMode() != null) { + result.sortMode(original.sortMode()); + } + result.setNestedFilter(original.getNestedFilter()); + result.setNestedPath(original.getNestedPath()); + return result; + } + result = new ScriptSortBuilder(original); + switch (randomIntBetween(0, 3)) { + case 0: + if (original.order() == SortOrder.ASC) { + result.order(SortOrder.DESC); + } else { + result.order(SortOrder.ASC); + } + break; + case 1: + result.sortMode(RandomSortDataGenerator.mode(original.sortMode())); + break; + case 2: + result.setNestedFilter(RandomSortDataGenerator.nestedFilter(original.getNestedFilter())); + break; + case 3: + result.setNestedPath(original.getNestedPath() + "_some_suffix"); + break; + } + return result; + } + + @Rule + public ExpectedException exceptionRule = ExpectedException.none(); + + public void testScriptSortType() { + // we rely on these ordinals in serialization, so changing them breaks bwc. + assertEquals(0, ScriptSortType.STRING.ordinal()); + assertEquals(1, ScriptSortType.NUMBER.ordinal()); + + assertEquals("string", ScriptSortType.STRING.toString()); + assertEquals("number", ScriptSortType.NUMBER.toString()); + + assertEquals(ScriptSortType.STRING, ScriptSortType.fromString("string")); + assertEquals(ScriptSortType.STRING, ScriptSortType.fromString("String")); + assertEquals(ScriptSortType.STRING, ScriptSortType.fromString("STRING")); + assertEquals(ScriptSortType.NUMBER, ScriptSortType.fromString("number")); + assertEquals(ScriptSortType.NUMBER, ScriptSortType.fromString("Number")); + assertEquals(ScriptSortType.NUMBER, ScriptSortType.fromString("NUMBER")); + } + + public void testScriptSortTypeNull() { + exceptionRule.expect(NullPointerException.class); + exceptionRule.expectMessage("input string is null"); + ScriptSortType.fromString(null); + } + + public void testScriptSortTypeIllegalArgument() { + exceptionRule.expect(IllegalArgumentException.class); + exceptionRule.expectMessage("Unknown ScriptSortType [xyz]"); + ScriptSortType.fromString("xyz"); + } + + public void testParseJson() throws IOException { + QueryParseContext context = new QueryParseContext(indicesQueriesRegistry); + context.parseFieldMatcher(new ParseFieldMatcher(Settings.EMPTY)); + String scriptSort = "{\n" + + "\"_script\" : {\n" + + "\"type\" : \"number\",\n" + + "\"script\" : {\n" + + "\"inline\": \"doc['field_name'].value * factor\",\n" + + "\"params\" : {\n" + + "\"factor\" : 1.1\n" + + "}\n" + + "},\n" + + "\"mode\" : \"max\",\n" + + "\"order\" : \"asc\"\n" + + "} }\n"; + XContentParser parser = XContentFactory.xContent(scriptSort).createParser(scriptSort); + parser.nextToken(); + parser.nextToken(); + parser.nextToken(); + + context.reset(parser); + ScriptSortBuilder builder = ScriptSortBuilder.PROTOTYPE.fromXContent(context, null); + assertEquals("doc['field_name'].value * factor", builder.script().getScript()); + assertNull(builder.script().getLang()); + assertEquals(1.1, builder.script().getParams().get("factor")); + assertEquals(ScriptType.INLINE, builder.script().getType()); + assertEquals(ScriptSortType.NUMBER, builder.type()); + assertEquals(SortOrder.ASC, builder.order()); + assertEquals(SortMode.MAX, builder.sortMode()); + assertNull(builder.getNestedFilter()); + assertNull(builder.getNestedPath()); + } + + public void testParseJsonOldStyle() throws IOException { + QueryParseContext context = new QueryParseContext(indicesQueriesRegistry); + context.parseFieldMatcher(new ParseFieldMatcher(Settings.EMPTY)); + String scriptSort = "{\n" + + "\"_script\" : {\n" + + "\"type\" : \"number\",\n" + + "\"script\" : \"doc['field_name'].value * factor\",\n" + + "\"params\" : {\n" + + "\"factor\" : 1.1\n" + + "},\n" + + "\"mode\" : \"max\",\n" + + "\"order\" : \"asc\"\n" + + "} }\n"; + XContentParser parser = XContentFactory.xContent(scriptSort).createParser(scriptSort); + parser.nextToken(); + parser.nextToken(); + parser.nextToken(); + + context.reset(parser); + ScriptSortBuilder builder = ScriptSortBuilder.PROTOTYPE.fromXContent(context, null); + assertEquals("doc['field_name'].value * factor", builder.script().getScript()); + assertNull(builder.script().getLang()); + assertEquals(1.1, builder.script().getParams().get("factor")); + assertEquals(ScriptType.INLINE, builder.script().getType()); + assertEquals(ScriptSortType.NUMBER, builder.type()); + assertEquals(SortOrder.ASC, builder.order()); + assertEquals(SortMode.MAX, builder.sortMode()); + assertNull(builder.getNestedFilter()); + assertNull(builder.getNestedPath()); + } + + public void testParseBadFieldNameExceptions() throws IOException { + QueryParseContext context = new QueryParseContext(indicesQueriesRegistry); + context.parseFieldMatcher(new ParseFieldMatcher(Settings.EMPTY)); + String scriptSort = "{\"_script\" : {" + "\"bad_field\" : \"number\"" + "} }"; + XContentParser parser = XContentFactory.xContent(scriptSort).createParser(scriptSort); + parser.nextToken(); + parser.nextToken(); + parser.nextToken(); + + context.reset(parser); + exceptionRule.expect(ParsingException.class); + exceptionRule.expectMessage("failed to parse field [bad_field]"); + ScriptSortBuilder.PROTOTYPE.fromXContent(context, null); + } + + public void testParseBadFieldNameExceptionsOnStartObject() throws IOException { + QueryParseContext context = new QueryParseContext(indicesQueriesRegistry); + context.parseFieldMatcher(new ParseFieldMatcher(Settings.EMPTY)); + + String scriptSort = "{\"_script\" : {" + "\"bad_field\" : { \"order\" : \"asc\" } } }"; + XContentParser parser = XContentFactory.xContent(scriptSort).createParser(scriptSort); + parser.nextToken(); + parser.nextToken(); + parser.nextToken(); + + context.reset(parser); + exceptionRule.expect(ParsingException.class); + exceptionRule.expectMessage("failed to parse field [bad_field]"); + ScriptSortBuilder.PROTOTYPE.fromXContent(context, null); + } + + public void testParseUnexpectedToken() throws IOException { + QueryParseContext context = new QueryParseContext(indicesQueriesRegistry); + context.parseFieldMatcher(new ParseFieldMatcher(Settings.EMPTY)); + + String scriptSort = "{\"_script\" : {" + "\"script\" : [ \"order\" : \"asc\" ] } }"; + XContentParser parser = XContentFactory.xContent(scriptSort).createParser(scriptSort); + parser.nextToken(); + parser.nextToken(); + parser.nextToken(); + + context.reset(parser); + exceptionRule.expect(ParsingException.class); + exceptionRule.expectMessage("unexpected token [START_ARRAY]"); + ScriptSortBuilder.PROTOTYPE.fromXContent(context, null); + } +} diff --git a/core/src/test/java/org/elasticsearch/search/sort/SortModeTests.java b/core/src/test/java/org/elasticsearch/search/sort/SortModeTests.java new file mode 100644 index 00000000000..29deb6dd76d --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/sort/SortModeTests.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.sort; + +import org.elasticsearch.test.ESTestCase; +import org.junit.Rule; +import org.junit.rules.ExpectedException; + +import java.util.Locale; + +public class SortModeTests extends ESTestCase { + + @Rule + public ExpectedException exceptionRule = ExpectedException.none(); + + public void testSortMode() { + // we rely on these ordinals in serialization, so changing them breaks bwc. + assertEquals(0, SortMode.MIN.ordinal()); + assertEquals(1, SortMode.MAX.ordinal()); + assertEquals(2, SortMode.SUM.ordinal()); + assertEquals(3, SortMode.AVG.ordinal()); + assertEquals(4, SortMode.MEDIAN.ordinal()); + + assertEquals("min", SortMode.MIN.toString()); + assertEquals("max", SortMode.MAX.toString()); + assertEquals("sum", SortMode.SUM.toString()); + assertEquals("avg", SortMode.AVG.toString()); + assertEquals("median", SortMode.MEDIAN.toString()); + + for (SortMode mode : SortMode.values()) { + assertEquals(mode, SortMode.fromString(mode.toString())); + assertEquals(mode, SortMode.fromString(mode.toString().toUpperCase(Locale.ROOT))); + } + } + + public void testParseNull() { + exceptionRule.expect(NullPointerException.class); + exceptionRule.expectMessage("input string is null"); + SortMode.fromString(null); + } + + public void testIllegalArgument() { + exceptionRule.expect(IllegalArgumentException.class); + exceptionRule.expectMessage("Unknown SortMode [xyz]"); + SortMode.fromString("xyz"); + } + +} diff --git a/core/src/test/java/org/elasticsearch/search/sort/SortParserTests.java b/core/src/test/java/org/elasticsearch/search/sort/SortParserTests.java index cbd7b5468b2..0c64b7e7b15 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/SortParserTests.java +++ b/core/src/test/java/org/elasticsearch/search/sort/SortParserTests.java @@ -50,7 +50,7 @@ public class SortParserTests extends ESSingleNodeTestCase { XContentParser parser = XContentHelper.createParser(sortBuilder.bytes()); parser.nextToken(); GeoDistanceSortParser geoParser = new GeoDistanceSortParser(); - geoParser.parse(parser, context); + geoParser.parse(parser, context.getQueryShardContext()); sortBuilder = jsonBuilder(); sortBuilder.startObject(); @@ -139,6 +139,6 @@ public class SortParserTests extends ESSingleNodeTestCase { XContentParser parser = XContentHelper.createParser(sortBuilder.bytes()); parser.nextToken(); GeoDistanceSortParser geoParser = new GeoDistanceSortParser(); - geoParser.parse(parser, context); + geoParser.parse(parser, context.getQueryShardContext()); } } diff --git a/core/src/test/java/org/elasticsearch/search/suggest/AbstractSuggestionBuilderTestCase.java b/core/src/test/java/org/elasticsearch/search/suggest/AbstractSuggestionBuilderTestCase.java new file mode 100644 index 00000000000..dc2b6081bd6 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/suggest/AbstractSuggestionBuilderTestCase.java @@ -0,0 +1,248 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.suggest; + +import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.query.QueryParseContext; +import org.elasticsearch.indices.query.IndicesQueriesRegistry; +import org.elasticsearch.script.ScriptContextRegistry; +import org.elasticsearch.script.ScriptEngineRegistry; +import org.elasticsearch.script.ScriptServiceTests.TestEngineService; +import org.elasticsearch.script.ScriptSettings; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder; +import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder; +import org.elasticsearch.search.suggest.term.TermSuggestionBuilder; +import org.elasticsearch.test.ESTestCase; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.Collections; + +import static org.elasticsearch.common.settings.Settings.settingsBuilder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; + +public abstract class AbstractSuggestionBuilderTestCase> extends ESTestCase { + + private static final int NUMBER_OF_TESTBUILDERS = 20; + protected static NamedWriteableRegistry namedWriteableRegistry; + protected static IndicesQueriesRegistry queriesRegistry; + protected static ParseFieldMatcher parseFieldMatcher; + protected static Suggesters suggesters; + + /** + * setup for the whole base test class + */ + @BeforeClass + public static void init() throws IOException { + Path genericConfigFolder = createTempDir(); + Settings baseSettings = settingsBuilder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .put(Environment.PATH_CONF_SETTING.getKey(), genericConfigFolder) + .build(); + Environment environment = new Environment(baseSettings); + ScriptContextRegistry scriptContextRegistry = new ScriptContextRegistry(Collections.emptyList()); + ScriptEngineRegistry scriptEngineRegistry = new ScriptEngineRegistry(Collections.singletonList(new ScriptEngineRegistry + .ScriptEngineRegistration(TestEngineService.class, TestEngineService.TYPES))); + ScriptSettings scriptSettings = new ScriptSettings(scriptEngineRegistry, scriptContextRegistry); + suggesters = new Suggesters(Collections.emptyMap()); + + namedWriteableRegistry = new NamedWriteableRegistry(); + namedWriteableRegistry.registerPrototype(SuggestionBuilder.class, TermSuggestionBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(SuggestionBuilder.class, PhraseSuggestionBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(SuggestionBuilder.class, CompletionSuggestionBuilder.PROTOTYPE); + queriesRegistry = new SearchModule(Settings.EMPTY, namedWriteableRegistry).buildQueryParserRegistry(); + parseFieldMatcher = ParseFieldMatcher.STRICT; + } + + @AfterClass + public static void afterClass() throws Exception { + namedWriteableRegistry = null; + suggesters = null; + queriesRegistry = null; + } + + /** + * Test serialization and deserialization of the suggestion builder + */ + public void testSerialization() throws IOException { + for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) { + SB original = randomTestBuilder(); + SB deserialized = serializedCopy(original); + assertEquals(deserialized, original); + assertEquals(deserialized.hashCode(), original.hashCode()); + assertNotSame(deserialized, original); + } + } + + /** + * returns a random suggestion builder, setting the common options randomly + */ + protected SB randomTestBuilder() { + SB randomSuggestion = randomSuggestionBuilder(); + return randomSuggestion; + } + + public static void setCommonPropertiesOnRandomBuilder(SuggestionBuilder randomSuggestion) { + randomSuggestion.text(randomAsciiOfLengthBetween(2, 20)); // have to set the text because we don't know if the global text was set + maybeSet(randomSuggestion::prefix, randomAsciiOfLengthBetween(2, 20)); + maybeSet(randomSuggestion::regex, randomAsciiOfLengthBetween(2, 20)); + maybeSet(randomSuggestion::analyzer, randomAsciiOfLengthBetween(2, 20)); + maybeSet(randomSuggestion::size, randomIntBetween(1, 20)); + maybeSet(randomSuggestion::shardSize, randomIntBetween(1, 20)); + } + + /** + * create a randomized {@link SuggestBuilder} that is used in further tests + */ + protected abstract SB randomSuggestionBuilder(); + + /** + * Test equality and hashCode properties + */ + public void testEqualsAndHashcode() throws IOException { + for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) { + SB firstBuilder = randomTestBuilder(); + assertFalse("suggestion builder is equal to null", firstBuilder.equals(null)); + assertFalse("suggestion builder is equal to incompatible type", firstBuilder.equals("")); + assertTrue("suggestion builder is not equal to self", firstBuilder.equals(firstBuilder)); + assertThat("same suggestion builder's hashcode returns different values if called multiple times", firstBuilder.hashCode(), + equalTo(firstBuilder.hashCode())); + final SB mutate = mutate(firstBuilder); + assertThat("different suggestion builders should not be equal", mutate, not(equalTo(firstBuilder))); + + SB secondBuilder = serializedCopy(firstBuilder); + assertTrue("suggestion builder is not equal to self", secondBuilder.equals(secondBuilder)); + assertTrue("suggestion builder is not equal to its copy", firstBuilder.equals(secondBuilder)); + assertTrue("equals is not symmetric", secondBuilder.equals(firstBuilder)); + assertThat("suggestion builder copy's hashcode is different from original hashcode", secondBuilder.hashCode(), + equalTo(firstBuilder.hashCode())); + + SB thirdBuilder = serializedCopy(secondBuilder); + assertTrue("suggestion builder is not equal to self", thirdBuilder.equals(thirdBuilder)); + assertTrue("suggestion builder is not equal to its copy", secondBuilder.equals(thirdBuilder)); + assertThat("suggestion builder copy's hashcode is different from original hashcode", secondBuilder.hashCode(), + equalTo(thirdBuilder.hashCode())); + assertTrue("equals is not transitive", firstBuilder.equals(thirdBuilder)); + assertThat("suggestion builder copy's hashcode is different from original hashcode", firstBuilder.hashCode(), + equalTo(thirdBuilder.hashCode())); + assertTrue("equals is not symmetric", thirdBuilder.equals(secondBuilder)); + assertTrue("equals is not symmetric", thirdBuilder.equals(firstBuilder)); + } + } + + /** + * creates random suggestion builder, renders it to xContent and back to new + * instance that should be equal to original + */ + public void testFromXContent() throws IOException { + QueryParseContext context = new QueryParseContext(null); + context.parseFieldMatcher(new ParseFieldMatcher(Settings.EMPTY)); + for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) { + SB suggestionBuilder = randomTestBuilder(); + XContentBuilder xContentBuilder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + if (randomBoolean()) { + xContentBuilder.prettyPrint(); + } + xContentBuilder.startObject(); + suggestionBuilder.toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS); + xContentBuilder.endObject(); + + XContentParser parser = XContentHelper.createParser(xContentBuilder.bytes()); + context.reset(parser); + // we need to skip the start object and the name, those will be parsed by outer SuggestBuilder + parser.nextToken(); + + SuggestionBuilder secondSuggestionBuilder = SuggestionBuilder.fromXContent(context, suggesters); + assertNotSame(suggestionBuilder, secondSuggestionBuilder); + assertEquals(suggestionBuilder, secondSuggestionBuilder); + assertEquals(suggestionBuilder.hashCode(), secondSuggestionBuilder.hashCode()); + } + } + + private SB mutate(SB firstBuilder) throws IOException { + SB mutation = serializedCopy(firstBuilder); + assertNotSame(mutation, firstBuilder); + // change ither one of the shared SuggestionBuilder parameters, or delegate to the specific tests mutate method + if (randomBoolean()) { + switch (randomIntBetween(0, 5)) { + case 0: + mutation.text(randomValueOtherThan(mutation.text(), () -> randomAsciiOfLengthBetween(2, 20))); + break; + case 1: + mutation.prefix(randomValueOtherThan(mutation.prefix(), () -> randomAsciiOfLengthBetween(2, 20))); + break; + case 2: + mutation.regex(randomValueOtherThan(mutation.regex(), () -> randomAsciiOfLengthBetween(2, 20))); + break; + case 3: + mutation.analyzer(randomValueOtherThan(mutation.analyzer(), () -> randomAsciiOfLengthBetween(2, 20))); + break; + case 4: + mutation.size(randomValueOtherThan(mutation.size(), () -> randomIntBetween(1, 20))); + break; + case 5: + mutation.shardSize(randomValueOtherThan(mutation.shardSize(), () -> randomIntBetween(1, 20))); + break; + } + } else { + mutateSpecificParameters(firstBuilder); + } + return mutation; + } + + /** + * take and input {@link SuggestBuilder} and return another one that is + * different in one aspect (to test non-equality) + */ + protected abstract void mutateSpecificParameters(SB firstBuilder) throws IOException; + + @SuppressWarnings("unchecked") + protected SB serializedCopy(SB original) throws IOException { + try (BytesStreamOutput output = new BytesStreamOutput()) { + output.writeSuggestion(original); + try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(output.bytes()), namedWriteableRegistry)) { + return (SB) in.readSuggestion(); + } + } + } + + protected static QueryParseContext newParseContext(final String xcontent) throws IOException { + final QueryParseContext parseContext = new QueryParseContext(queriesRegistry); + parseContext.reset(XContentFactory.xContent(xcontent).createParser(xcontent)); + parseContext.parseFieldMatcher(parseFieldMatcher); + return parseContext; + } +} diff --git a/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java b/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java index 04369348b6f..efc7521c05e 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java @@ -20,10 +20,10 @@ package org.elasticsearch.search.suggest; import com.carrotsearch.hppc.ObjectLongHashMap; import com.carrotsearch.randomizedtesting.generators.RandomStrings; + import org.apache.lucene.analysis.TokenStreamToAutomaton; import org.apache.lucene.search.suggest.document.ContextSuggestField; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; import org.elasticsearch.action.admin.indices.segments.IndexShardSegments; @@ -32,21 +32,20 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.percolate.PercolateResponse; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.suggest.SuggestRequest; import org.elasticsearch.action.suggest.SuggestResponse; import org.elasticsearch.client.Requests; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.percolator.PercolatorService; +import org.elasticsearch.index.percolator.PercolatorFieldMapper; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.suggest.completion.CompletionStats; import org.elasticsearch.search.suggest.completion.CompletionSuggestion; import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder; -import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder.FuzzyOptionsBuilder; +import org.elasticsearch.search.suggest.completion.FuzzyOptions; import org.elasticsearch.search.suggest.completion.context.CategoryContextMapping; import org.elasticsearch.search.suggest.completion.context.ContextMapping; import org.elasticsearch.search.suggest.completion.context.GeoContextMapping; @@ -55,6 +54,7 @@ import org.elasticsearch.test.ESIntegTestCase; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; @@ -103,7 +103,7 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase { )); } indexRandom(true, indexRequestBuilders); - CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("sugg"); + CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg"); assertSuggestions("foo", prefix, "suggestion10", "suggestion9", "suggestion8", "suggestion7", "suggestion6"); } @@ -124,7 +124,7 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase { )); } indexRandom(true, indexRequestBuilders); - CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion("foo").field(FIELD).regex("sugg.*es"); + CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD).regex("sugg.*es"); assertSuggestions("foo", prefix, "sugg10estion", "sugg9estion", "sugg8estion", "sugg7estion", "sugg6estion"); } @@ -145,7 +145,7 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase { )); } indexRandom(true, indexRequestBuilders); - CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("sugg", Fuzziness.ONE); + CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg", Fuzziness.ONE); assertSuggestions("foo", prefix, "sugxgestion10", "sugxgestion9", "sugxgestion8", "sugxgestion7", "sugxgestion6"); } @@ -171,13 +171,13 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase { for (int i = 0; i < size; i++) { outputs[i] = "suggestion" + (numDocs - i); } - CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("sug").size(size); + CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD).prefix("sug").size(size); assertSuggestions("foo", prefix, outputs); - CompletionSuggestionBuilder regex = SuggestBuilders.completionSuggestion("foo").field(FIELD).regex("su[g|s]g").size(size); + CompletionSuggestionBuilder regex = SuggestBuilders.completionSuggestion(FIELD).regex("su[g|s]g").size(size); assertSuggestions("foo", regex, outputs); - CompletionSuggestionBuilder fuzzyPrefix = SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("sugg", Fuzziness.ONE).size(size); + CompletionSuggestionBuilder fuzzyPrefix = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg", Fuzziness.ONE).size(size); assertSuggestions("foo", fuzzyPrefix, outputs); } @@ -196,8 +196,9 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase { } indexRandom(true, indexRequestBuilders); - CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("sugg").size(numDocs).payload("count"); - SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(prefix).execute().actionGet(); + CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg"). + size(numDocs).payload(Collections.singletonList("count")); + SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion("foo", prefix).execute().actionGet(); assertNoFailures(suggestResponse); CompletionSuggestion completionSuggestion = suggestResponse.getSuggest().getSuggestion("foo"); CompletionSuggestion.Entry options = completionSuggestion.getEntries().get(0); @@ -208,32 +209,6 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase { } } - public void testMalformedRequestPayload() throws Exception { - final CompletionMappingBuilder mapping = new CompletionMappingBuilder(); - createIndexAndMapping(mapping); - SuggestRequest request = new SuggestRequest(INDEX); - XContentBuilder suggest = jsonBuilder().startObject() - .startObject("bad-payload") - .field("prefix", "sug") - .startObject("completion") - .field("field", FIELD) - .startArray("payload") - .startObject() - .field("payload", "field") - .endObject() - .endArray() - .endObject() - .endObject().endObject(); - request.suggest(suggest.bytes()); - ensureGreen(); - - SuggestResponse suggestResponse = client().suggest(request).get(); - assertThat(suggestResponse.getSuccessfulShards(), equalTo(0)); - for (ShardOperationFailedException exception : suggestResponse.getShardFailures()) { - assertThat(exception.reason(), containsString("ParsingException[[completion] failed to parse field [payload]]; nested: IllegalStateException[Can't get text on a START_OBJECT")); - } - } - public void testMissingPayloadField() throws Exception { final CompletionMappingBuilder mapping = new CompletionMappingBuilder(); createIndexAndMapping(mapping); @@ -242,8 +217,9 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase { client().prepareIndex(INDEX, TYPE, "2").setSource(FIELD, "suggestion") ); indexRandom(true, indexRequestBuilders); - CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("sugg").payload("test_field"); - SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(prefix).execute().actionGet(); + CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg") + .payload(Collections.singletonList("test_field")); + SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion("foo", prefix).execute().actionGet(); assertNoFailures(suggestResponse); CompletionSuggestion completionSuggestion = suggestResponse.getSuggest().getSuggestion("foo"); CompletionSuggestion.Entry options = completionSuggestion.getEntries().get(0); @@ -279,8 +255,9 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase { indexRequestBuilders.add(client().prepareIndex(INDEX, TYPE, "2").setSource(source)); indexRandom(true, indexRequestBuilders); - CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("sugg").payload("title", "count"); - SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(prefix).execute().actionGet(); + CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg") + .payload(Arrays.asList("title", "count")); + SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion("foo", prefix).execute().actionGet(); assertNoFailures(suggestResponse); CompletionSuggestion completionSuggestion = suggestResponse.getSuggest().getSuggestion("foo"); List options = completionSuggestion.getEntries().get(0).getOptions(); @@ -324,13 +301,14 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase { int suggestionSize = randomIntBetween(1, numDocs); int numRequestedPayloadFields = randomIntBetween(2, numPayloadFields); - String[] payloadFields = new String[numRequestedPayloadFields]; + List payloadFields = new ArrayList<>(numRequestedPayloadFields); for (int i = 0; i < numRequestedPayloadFields; i++) { - payloadFields[i] = "test_field" + i; + payloadFields.add("test_field" + i); } - CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("sugg").size(suggestionSize).payload(payloadFields); - SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(prefix).execute().actionGet(); + CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg") + .size(suggestionSize).payload(payloadFields); + SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion("foo", prefix).execute().actionGet(); assertNoFailures(suggestResponse); CompletionSuggestion completionSuggestion = suggestResponse.getSuggest().getSuggestion("foo"); CompletionSuggestion.Entry options = completionSuggestion.getEntries().get(0); @@ -369,7 +347,7 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase { .setSource(source).execute().actionGet(); } - client().prepareIndex(INDEX, PercolatorService.TYPE_NAME, "4") + client().prepareIndex(INDEX, PercolatorFieldMapper.TYPE_NAME, "4") .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()) .execute().actionGet(); @@ -428,8 +406,8 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase { refresh(); - SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion( - new CompletionSuggestionBuilder("testSuggestions").field(FIELD).text("test").size(10) + SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion("testSuggestions", + new CompletionSuggestionBuilder(FIELD).text("test").size(10) ).execute().actionGet(); assertSuggestions(suggestResponse, "testSuggestions", "testing"); @@ -629,16 +607,16 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase { .get(); assertThat(putMappingResponse.isAcknowledged(), is(true)); - SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion( - SuggestBuilders.completionSuggestion("suggs").field(FIELD + ".suggest").text("f").size(10) + SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion("suggs", + SuggestBuilders.completionSuggestion(FIELD + ".suggest").text("f").size(10) ).execute().actionGet(); assertSuggestions(suggestResponse, "suggs"); client().prepareIndex(INDEX, TYPE, "1").setRefresh(true).setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").endObject()).get(); ensureGreen(INDEX); - SuggestResponse afterReindexingResponse = client().prepareSuggest(INDEX).addSuggestion( - SuggestBuilders.completionSuggestion("suggs").field(FIELD + ".suggest").text("f").size(10) + SuggestResponse afterReindexingResponse = client().prepareSuggest(INDEX).addSuggestion("suggs", + SuggestBuilders.completionSuggestion(FIELD + ".suggest").text("f").size(10) ).execute().actionGet(); assertSuggestions(afterReindexingResponse, "suggs", "Foo Fighters"); } @@ -654,13 +632,13 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase { refresh(); - SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion( - SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("Nirv").size(10) + SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion("foo", + SuggestBuilders.completionSuggestion(FIELD).prefix("Nirv").size(10) ).execute().actionGet(); assertSuggestions(suggestResponse, false, "foo", "Nirvana"); - suggestResponse = client().prepareSuggest(INDEX).addSuggestion( - SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("Nirw", Fuzziness.ONE).size(10) + suggestResponse = client().prepareSuggest(INDEX).addSuggestion("foo", + SuggestBuilders.completionSuggestion(FIELD).prefix("Nirw", Fuzziness.ONE).size(10) ).execute().actionGet(); assertSuggestions(suggestResponse, false, "foo", "Nirvana"); } @@ -677,14 +655,14 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase { refresh(); // edit distance 1 - SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion( - SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("Norw", Fuzziness.ONE).size(10) + SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion("foo", + SuggestBuilders.completionSuggestion(FIELD).prefix("Norw", Fuzziness.ONE).size(10) ).execute().actionGet(); assertSuggestions(suggestResponse, false, "foo"); // edit distance 2 - suggestResponse = client().prepareSuggest(INDEX).addSuggestion( - SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("Norw", Fuzziness.TWO).size(10) + suggestResponse = client().prepareSuggest(INDEX).addSuggestion("foo", + SuggestBuilders.completionSuggestion(FIELD).prefix("Norw", Fuzziness.TWO).size(10) ).execute().actionGet(); assertSuggestions(suggestResponse, false, "foo", "Nirvana"); } @@ -700,13 +678,13 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase { refresh(); - SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion( - SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("Nriv", new FuzzyOptionsBuilder().setTranspositions(false)).size(10) + SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion("foo", + SuggestBuilders.completionSuggestion(FIELD).prefix("Nriv", FuzzyOptions.builder().setTranspositions(false).build()).size(10) ).execute().actionGet(); assertSuggestions(suggestResponse, false, "foo"); - suggestResponse = client().prepareSuggest(INDEX).addSuggestion( - SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("Nriv", Fuzziness.ONE).size(10) + suggestResponse = client().prepareSuggest(INDEX).addSuggestion("foo", + SuggestBuilders.completionSuggestion(FIELD).prefix("Nriv", Fuzziness.ONE).size(10) ).execute().actionGet(); assertSuggestions(suggestResponse, false, "foo", "Nirvana"); } @@ -722,13 +700,13 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase { refresh(); - SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion( - SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("Nriva", new FuzzyOptionsBuilder().setFuzzyMinLength(6)).size(10) + SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion("foo", + SuggestBuilders.completionSuggestion(FIELD).prefix("Nriva", FuzzyOptions.builder().setFuzzyMinLength(6).build()).size(10) ).execute().actionGet(); assertSuggestions(suggestResponse, false, "foo"); - suggestResponse = client().prepareSuggest(INDEX).addSuggestion( - SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("Nrivan", new FuzzyOptionsBuilder().setFuzzyMinLength(6)).size(10) + suggestResponse = client().prepareSuggest(INDEX).addSuggestion("foo", + SuggestBuilders.completionSuggestion(FIELD).prefix("Nrivan", FuzzyOptions.builder().setFuzzyMinLength(6).build()).size(10) ).execute().actionGet(); assertSuggestions(suggestResponse, false, "foo", "Nirvana"); } @@ -744,13 +722,13 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase { refresh(); - SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion( - SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("Nirw", new FuzzyOptionsBuilder().setFuzzyPrefixLength(4)).size(10) + SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion("foo", + SuggestBuilders.completionSuggestion(FIELD).prefix("Nirw", FuzzyOptions.builder().setFuzzyPrefixLength(4).build()).size(10) ).execute().actionGet(); assertSuggestions(suggestResponse, false, "foo"); - suggestResponse = client().prepareSuggest(INDEX).addSuggestion( - SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("Nirvo", new FuzzyOptionsBuilder().setFuzzyPrefixLength(4)).size(10) + suggestResponse = client().prepareSuggest(INDEX).addSuggestion("foo", + SuggestBuilders.completionSuggestion(FIELD).prefix("Nirvo", FuzzyOptions.builder().setFuzzyPrefixLength(4).build()).size(10) ).execute().actionGet(); assertSuggestions(suggestResponse, false, "foo", "Nirvana"); } @@ -768,19 +746,19 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase { // suggestion with a character, which needs unicode awareness org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder completionSuggestionBuilder = - SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("öööи", new FuzzyOptionsBuilder().setUnicodeAware(true)).size(10); + SuggestBuilders.completionSuggestion(FIELD).prefix("öööи", FuzzyOptions.builder().setUnicodeAware(true).build()).size(10); - SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(completionSuggestionBuilder).execute().actionGet(); + SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion("foo", completionSuggestionBuilder).execute().actionGet(); assertSuggestions(suggestResponse, false, "foo", "ööööö"); // removing unicode awareness leads to no result - completionSuggestionBuilder = SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("öööи", new FuzzyOptionsBuilder().setUnicodeAware(false)).size(10); - suggestResponse = client().prepareSuggest(INDEX).addSuggestion(completionSuggestionBuilder).execute().actionGet(); + completionSuggestionBuilder = SuggestBuilders.completionSuggestion(FIELD).prefix("öööи", FuzzyOptions.builder().setUnicodeAware(false).build()).size(10); + suggestResponse = client().prepareSuggest(INDEX).addSuggestion("foo", completionSuggestionBuilder).execute().actionGet(); assertSuggestions(suggestResponse, false, "foo"); // increasing edit distance instead of unicode awareness works again, as this is only a single character - completionSuggestionBuilder = SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("öööи", new FuzzyOptionsBuilder().setUnicodeAware(false).setFuzziness(Fuzziness.TWO)).size(10); - suggestResponse = client().prepareSuggest(INDEX).addSuggestion(completionSuggestionBuilder).execute().actionGet(); + completionSuggestionBuilder = SuggestBuilders.completionSuggestion(FIELD).prefix("öööи", FuzzyOptions.builder().setUnicodeAware(false).setFuzziness(Fuzziness.TWO).build()).size(10); + suggestResponse = client().prepareSuggest(INDEX).addSuggestion("foo", completionSuggestionBuilder).execute().actionGet(); assertSuggestions(suggestResponse, false, "foo", "ööööö"); } @@ -809,8 +787,8 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase { refresh(); ensureGreen(); // load the fst index into ram - client().prepareSuggest(INDEX).addSuggestion(SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("f")).get(); - client().prepareSuggest(INDEX).addSuggestion(SuggestBuilders.completionSuggestion("foo").field(otherField).prefix("f")).get(); + client().prepareSuggest(INDEX).addSuggestion("foo", SuggestBuilders.completionSuggestion(FIELD).prefix("f")).get(); + client().prepareSuggest(INDEX).addSuggestion("foo", SuggestBuilders.completionSuggestion(otherField).prefix("f")).get(); // Get all stats IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats(INDEX).setIndices(INDEX).setCompletion(true).get(); @@ -907,22 +885,22 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase { } - public void assertSuggestions(String suggestionName, SuggestBuilder.SuggestionBuilder suggestBuilder, String... suggestions) { - SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(suggestBuilder + public void assertSuggestions(String suggestionName, SuggestionBuilder suggestBuilder, String... suggestions) { + SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(suggestionName, suggestBuilder ).execute().actionGet(); assertSuggestions(suggestResponse, suggestionName, suggestions); } public void assertSuggestions(String suggestion, String... suggestions) { String suggestionName = RandomStrings.randomAsciiOfLength(random(), 10); - CompletionSuggestionBuilder suggestionBuilder = SuggestBuilders.completionSuggestion(suggestionName).field(FIELD).text(suggestion).size(10); + CompletionSuggestionBuilder suggestionBuilder = SuggestBuilders.completionSuggestion(FIELD).text(suggestion).size(10); assertSuggestions(suggestionName, suggestionBuilder, suggestions); } public void assertSuggestionsNotInOrder(String suggestString, String... suggestions) { String suggestionName = RandomStrings.randomAsciiOfLength(random(), 10); - SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion( - SuggestBuilders.completionSuggestion(suggestionName).field(FIELD).text(suggestString).size(10) + SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(suggestionName, + SuggestBuilders.completionSuggestion(FIELD).text(suggestString).size(10) ).execute().actionGet(); assertSuggestions(suggestResponse, false, suggestionName, suggestions); diff --git a/core/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java b/core/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java index 7096a0f34ac..58458c9d244 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.suggest; import com.carrotsearch.randomizedtesting.generators.RandomStrings; + import org.apache.lucene.spatial.util.GeoHashUtils; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; import org.elasticsearch.action.index.IndexRequestBuilder; @@ -35,12 +36,14 @@ import org.elasticsearch.search.suggest.completion.context.ContextBuilder; import org.elasticsearch.search.suggest.completion.context.ContextMapping; import org.elasticsearch.search.suggest.completion.context.GeoContextMapping; import org.elasticsearch.search.suggest.completion.context.GeoQueryContext; +import org.elasticsearch.search.suggest.completion.context.QueryContext; import org.elasticsearch.test.ESIntegTestCase; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; @@ -89,7 +92,7 @@ public class ContextCompletionSuggestSearchIT extends ESIntegTestCase { } indexRandom(true, indexRequestBuilders); ensureYellow(INDEX); - CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("sugg"); + CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg"); assertSuggestions("foo", prefix, "suggestion9", "suggestion8", "suggestion7", "suggestion6", "suggestion5"); } @@ -121,7 +124,7 @@ public class ContextCompletionSuggestSearchIT extends ESIntegTestCase { } indexRandom(true, indexRequestBuilders); ensureYellow(INDEX); - CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion("foo").field(FIELD).regex("sugg.*es"); + CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD).regex("sugg.*es"); assertSuggestions("foo", prefix, "sugg9estion", "sugg8estion", "sugg7estion", "sugg6estion", "sugg5estion"); } @@ -153,7 +156,7 @@ public class ContextCompletionSuggestSearchIT extends ESIntegTestCase { } indexRandom(true, indexRequestBuilders); ensureYellow(INDEX); - CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("sugg", Fuzziness.ONE); + CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg", Fuzziness.ONE); assertSuggestions("foo", prefix, "sugxgestion9", "sugxgestion8", "sugxgestion7", "sugxgestion6", "sugxgestion5"); } @@ -178,8 +181,8 @@ public class ContextCompletionSuggestSearchIT extends ESIntegTestCase { } indexRandom(true, indexRequestBuilders); ensureYellow(INDEX); - CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("sugg") - .categoryContexts("cat", CategoryQueryContext.builder().setCategory("cat0").build()); + CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg") + .contexts(Collections.singletonMap("cat", Collections.singletonList(CategoryQueryContext.builder().setCategory("cat0").build()))); assertSuggestions("foo", prefix, "suggestion8", "suggestion6", "suggestion4", "suggestion2", "suggestion0"); } @@ -205,10 +208,10 @@ public class ContextCompletionSuggestSearchIT extends ESIntegTestCase { } indexRandom(true, indexRequestBuilders); ensureYellow(INDEX); - CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("sugg") - .categoryContexts("cat", - CategoryQueryContext.builder().setCategory("cat0").setBoost(3).build(), - CategoryQueryContext.builder().setCategory("cat1").build() + CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg") + .contexts(Collections.singletonMap("cat", + Arrays.asList(CategoryQueryContext.builder().setCategory("cat0").setBoost(3).build(), + CategoryQueryContext.builder().setCategory("cat1").build())) ); assertSuggestions("foo", prefix, "suggestion8", "suggestion6", "suggestion4", "suggestion9", "suggestion2"); } @@ -235,7 +238,7 @@ public class ContextCompletionSuggestSearchIT extends ESIntegTestCase { } indexRandom(true, indexRequestBuilders); ensureYellow(INDEX); - CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("sugg"); + CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg"); assertSuggestions("foo", prefix, "suggestion9", "suggestion8", "suggestion7", "suggestion6", "suggestion5"); } @@ -265,25 +268,22 @@ public class ContextCompletionSuggestSearchIT extends ESIntegTestCase { ensureYellow(INDEX); // filter only on context cat - CompletionSuggestionBuilder catFilterSuggest = SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("sugg"); - catFilterSuggest.categoryContexts("cat", CategoryQueryContext.builder().setCategory("cat0").build()); + CompletionSuggestionBuilder catFilterSuggest = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg"); + catFilterSuggest.contexts(Collections.singletonMap("cat", Collections.singletonList(CategoryQueryContext.builder().setCategory("cat0").build()))); assertSuggestions("foo", catFilterSuggest, "suggestion8", "suggestion6", "suggestion4", "suggestion2", "suggestion0"); // filter only on context type - CompletionSuggestionBuilder typeFilterSuggest = SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("sugg"); - typeFilterSuggest.categoryContexts("type", CategoryQueryContext.builder().setCategory("type2").build(), - CategoryQueryContext.builder().setCategory("type1").build()); + CompletionSuggestionBuilder typeFilterSuggest = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg"); + typeFilterSuggest.contexts(Collections.singletonMap("type", Arrays.asList(CategoryQueryContext.builder().setCategory("type2").build(), + CategoryQueryContext.builder().setCategory("type1").build()))); assertSuggestions("foo", typeFilterSuggest, "suggestion9", "suggestion6", "suggestion5", "suggestion2", "suggestion1"); - CompletionSuggestionBuilder multiContextFilterSuggest = SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("sugg"); + CompletionSuggestionBuilder multiContextFilterSuggest = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg"); // query context order should never matter - if (randomBoolean()) { - multiContextFilterSuggest.categoryContexts("type", CategoryQueryContext.builder().setCategory("type2").build()); - multiContextFilterSuggest.categoryContexts("cat", CategoryQueryContext.builder().setCategory("cat2").build()); - } else { - multiContextFilterSuggest.categoryContexts("cat", CategoryQueryContext.builder().setCategory("cat2").build()); - multiContextFilterSuggest.categoryContexts("type", CategoryQueryContext.builder().setCategory("type2").build()); - } + Map> contextMap = new HashMap<>(); + contextMap.put("type", Collections.singletonList(CategoryQueryContext.builder().setCategory("type2").build())); + contextMap.put("cat", Collections.singletonList(CategoryQueryContext.builder().setCategory("cat2").build())); + multiContextFilterSuggest.contexts(contextMap); assertSuggestions("foo", multiContextFilterSuggest, "suggestion6", "suggestion2"); } @@ -313,37 +313,34 @@ public class ContextCompletionSuggestSearchIT extends ESIntegTestCase { ensureYellow(INDEX); // boost only on context cat - CompletionSuggestionBuilder catBoostSuggest = SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("sugg"); - catBoostSuggest.categoryContexts("cat", - CategoryQueryContext.builder().setCategory("cat0").setBoost(3).build(), - CategoryQueryContext.builder().setCategory("cat1").build()); + CompletionSuggestionBuilder catBoostSuggest = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg"); + catBoostSuggest.contexts(Collections.singletonMap("cat", + Arrays.asList( + CategoryQueryContext.builder().setCategory("cat0").setBoost(3).build(), + CategoryQueryContext.builder().setCategory("cat1").build()))); assertSuggestions("foo", catBoostSuggest, "suggestion8", "suggestion6", "suggestion4", "suggestion9", "suggestion2"); // boost only on context type - CompletionSuggestionBuilder typeBoostSuggest = SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("sugg"); - typeBoostSuggest.categoryContexts("type", - CategoryQueryContext.builder().setCategory("type2").setBoost(2).build(), - CategoryQueryContext.builder().setCategory("type1").setBoost(4).build()); + CompletionSuggestionBuilder typeBoostSuggest = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg"); + typeBoostSuggest.contexts(Collections.singletonMap("type", + Arrays.asList( + CategoryQueryContext.builder().setCategory("type2").setBoost(2).build(), + CategoryQueryContext.builder().setCategory("type1").setBoost(4).build()))); assertSuggestions("foo", typeBoostSuggest, "suggestion9", "suggestion5", "suggestion6", "suggestion1", "suggestion2"); // boost on both contexts - CompletionSuggestionBuilder multiContextBoostSuggest = SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("sugg"); + CompletionSuggestionBuilder multiContextBoostSuggest = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg"); // query context order should never matter - if (randomBoolean()) { - multiContextBoostSuggest.categoryContexts("type", - CategoryQueryContext.builder().setCategory("type2").setBoost(2).build(), - CategoryQueryContext.builder().setCategory("type1").setBoost(4).build()); - multiContextBoostSuggest.categoryContexts("cat", - CategoryQueryContext.builder().setCategory("cat0").setBoost(3).build(), - CategoryQueryContext.builder().setCategory("cat1").build()); - } else { - multiContextBoostSuggest.categoryContexts("cat", - CategoryQueryContext.builder().setCategory("cat0").setBoost(3).build(), - CategoryQueryContext.builder().setCategory("cat1").build()); - multiContextBoostSuggest.categoryContexts("type", - CategoryQueryContext.builder().setCategory("type2").setBoost(2).build(), - CategoryQueryContext.builder().setCategory("type1").setBoost(4).build()); - } + Map> contextMap = new HashMap<>(); + contextMap.put("type", Arrays.asList( + CategoryQueryContext.builder().setCategory("type2").setBoost(2).build(), + CategoryQueryContext.builder().setCategory("type1").setBoost(4).build()) + ); + contextMap.put("cat", Arrays.asList( + CategoryQueryContext.builder().setCategory("cat0").setBoost(3).build(), + CategoryQueryContext.builder().setCategory("cat1").build()) + ); + multiContextBoostSuggest.contexts(contextMap); assertSuggestions("foo", multiContextBoostSuggest, "suggestion9", "suggestion6", "suggestion5", "suggestion2", "suggestion1"); } @@ -374,7 +371,7 @@ public class ContextCompletionSuggestSearchIT extends ESIntegTestCase { } indexRandom(true, indexRequestBuilders); ensureYellow(INDEX); - CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("sugg"); + CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg"); assertSuggestions("foo", prefix, "suggestion9", "suggestion8", "suggestion7", "suggestion6", "suggestion5"); } @@ -405,7 +402,7 @@ public class ContextCompletionSuggestSearchIT extends ESIntegTestCase { indexRandom(true, indexRequestBuilders); ensureYellow(INDEX); - CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("sugg"); + CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg"); assertSuggestions("foo", prefix, "suggestion0", "suggestion1", "suggestion2", "suggestion3", "suggestion4"); } @@ -431,7 +428,7 @@ public class ContextCompletionSuggestSearchIT extends ESIntegTestCase { } indexRandom(true, indexRequestBuilders); ensureYellow(INDEX); - CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("sugg"); + CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg"); assertSuggestions("foo", prefix, "suggestion9", "suggestion8", "suggestion7", "suggestion6", "suggestion5"); } @@ -458,11 +455,12 @@ public class ContextCompletionSuggestSearchIT extends ESIntegTestCase { } indexRandom(true, indexRequestBuilders); ensureYellow(INDEX); - CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("sugg"); + CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg"); assertSuggestions("foo", prefix, "suggestion9", "suggestion8", "suggestion7", "suggestion6", "suggestion5"); - CompletionSuggestionBuilder geoFilteringPrefix = SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("sugg") - .geoContexts("geo", GeoQueryContext.builder().setGeoPoint(new GeoPoint(geoPoints[0])).build()); + CompletionSuggestionBuilder geoFilteringPrefix = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg") + .contexts(Collections.singletonMap("geo", Collections.singletonList( + GeoQueryContext.builder().setGeoPoint(new GeoPoint(geoPoints[0])).build()))); assertSuggestions("foo", geoFilteringPrefix, "suggestion8", "suggestion6", "suggestion4", "suggestion2", "suggestion0"); } @@ -490,13 +488,13 @@ public class ContextCompletionSuggestSearchIT extends ESIntegTestCase { } indexRandom(true, indexRequestBuilders); ensureYellow(INDEX); - CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("sugg"); + CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg"); assertSuggestions("foo", prefix, "suggestion9", "suggestion8", "suggestion7", "suggestion6", "suggestion5"); GeoQueryContext context1 = GeoQueryContext.builder().setGeoPoint(geoPoints[0]).setBoost(2).build(); GeoQueryContext context2 = GeoQueryContext.builder().setGeoPoint(geoPoints[1]).build(); - CompletionSuggestionBuilder geoBoostingPrefix = SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("sugg") - .geoContexts("geo", context1, context2); + CompletionSuggestionBuilder geoBoostingPrefix = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg") + .contexts(Collections.singletonMap("geo", Arrays.asList(context1, context2))); assertSuggestions("foo", geoBoostingPrefix, "suggestion8", "suggestion6", "suggestion4", "suggestion9", "suggestion7"); } @@ -526,8 +524,8 @@ public class ContextCompletionSuggestSearchIT extends ESIntegTestCase { } indexRandom(true, indexRequestBuilders); ensureYellow(INDEX); - CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("sugg") - .geoContexts("geo", GeoQueryContext.builder().setGeoPoint(new GeoPoint(52.2263, 4.543)).build()); + CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg") + .contexts(Collections.singletonMap("geo", Collections.singletonList(GeoQueryContext.builder().setGeoPoint(new GeoPoint(52.2263, 4.543)).build()))); assertSuggestions("foo", prefix, "suggestion9", "suggestion8", "suggestion7", "suggestion6", "suggestion5"); } @@ -564,11 +562,11 @@ public class ContextCompletionSuggestSearchIT extends ESIntegTestCase { } indexRandom(true, indexRequestBuilders); ensureYellow(INDEX); - CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("sugg"); + CompletionSuggestionBuilder prefix = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg"); assertSuggestions("foo", prefix, "suggestion9", "suggestion8", "suggestion7", "suggestion6", "suggestion5"); - CompletionSuggestionBuilder geoNeighbourPrefix = SuggestBuilders.completionSuggestion("foo").field(FIELD).prefix("sugg") - .geoContexts("geo", GeoQueryContext.builder().setGeoPoint(GeoPoint.fromGeohash(geohash)).build()); + CompletionSuggestionBuilder geoNeighbourPrefix = SuggestBuilders.completionSuggestion(FIELD).prefix("sugg") + .contexts(Collections.singletonMap("geo", Collections.singletonList(GeoQueryContext.builder().setGeoPoint(GeoPoint.fromGeohash(geohash)).build()))); assertSuggestions("foo", geoNeighbourPrefix, "suggestion9", "suggestion8", "suggestion7", "suggestion6", "suggestion5"); } @@ -624,16 +622,16 @@ public class ContextCompletionSuggestSearchIT extends ESIntegTestCase { refresh(); String suggestionName = randomAsciiOfLength(10); - CompletionSuggestionBuilder context = SuggestBuilders.completionSuggestion(suggestionName).field(FIELD).text("h").size(10) - .geoContexts("st", GeoQueryContext.builder().setGeoPoint(new GeoPoint(52.52, 13.4)).build()); - SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(context).get(); + CompletionSuggestionBuilder context = SuggestBuilders.completionSuggestion(FIELD).text("h").size(10) + .contexts(Collections.singletonMap("st", Collections.singletonList(GeoQueryContext.builder().setGeoPoint(new GeoPoint(52.52, 13.4)).build()))); + SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(suggestionName, context).get(); assertEquals(suggestResponse.getSuggest().size(), 1); assertEquals("Hotel Amsterdam in Berlin", suggestResponse.getSuggest().getSuggestion(suggestionName).iterator().next().getOptions().iterator().next().getText().string()); } - public void assertSuggestions(String suggestionName, SuggestBuilder.SuggestionBuilder suggestBuilder, String... suggestions) { - SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(suggestBuilder + public void assertSuggestions(String suggestionName, SuggestionBuilder suggestBuilder, String... suggestions) { + SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(suggestionName, suggestBuilder ).execute().actionGet(); CompletionSuggestSearchIT.assertSuggestions(suggestResponse, suggestionName, suggestions); } diff --git a/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java b/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java index 35d495272ca..150db34ff78 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java @@ -21,6 +21,7 @@ package org.elasticsearch.search.suggest; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.util.CharsRefBuilder; import org.elasticsearch.common.text.Text; +import org.elasticsearch.index.query.QueryShardContext; import java.io.IOException; import java.util.Locale; @@ -31,6 +32,7 @@ import java.util.Map; */ public class CustomSuggester extends Suggester { + public static CustomSuggester PROTOTYPE = new CustomSuggester(); // This is a pretty dumb implementation which returns the original text + fieldName + custom config option + 12 or 123 @Override @@ -52,23 +54,18 @@ public class CustomSuggester extends Suggester { - Map options = parser.map(); - CustomSuggestionsContext suggestionContext = new CustomSuggestionsContext(CustomSuggester.this, options); - suggestionContext.setField((String) options.get("field")); - return suggestionContext; - }; - } - public static class CustomSuggestionsContext extends SuggestionSearchContext.SuggestionContext { public Map options; - public CustomSuggestionsContext(Suggester suggester, Map options) { - super(suggester); + public CustomSuggestionsContext(QueryShardContext context, Map options) { + super(new CustomSuggester(), context); this.options = options; } } + + @Override + public SuggestionBuilder getBuilderPrototype() { + return CustomSuggesterSearchIT.CustomSuggestionBuilder.PROTOTYPE; + } } diff --git a/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggesterPlugin.java b/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggesterPlugin.java index c5e36da2ea7..19d6ed4e098 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggesterPlugin.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggesterPlugin.java @@ -37,7 +37,7 @@ public class CustomSuggesterPlugin extends Plugin { } public void onModule(SearchModule searchModule) { - searchModule.registerSuggester("custom", CustomSuggester.class); + searchModule.registerSuggester("custom", CustomSuggester.PROTOTYPE); } } diff --git a/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggesterSearchIT.java b/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggesterSearchIT.java index 18b4fa50e7b..1c82a1aaf37 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggesterSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggesterSearchIT.java @@ -20,17 +20,30 @@ package org.elasticsearch.search.suggest; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryParseContext; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.suggest.SuggestionSearchContext.SuggestionContext; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import java.io.IOException; import java.util.Collection; +import java.util.HashMap; import java.util.List; import java.util.Locale; +import java.util.Map; +import java.util.Objects; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.hasSize; @@ -59,16 +72,7 @@ public class CustomSuggesterSearchIT extends ESIntegTestCase { String randomField = randomAsciiOfLength(10); String randomSuffix = randomAsciiOfLength(10); SuggestBuilder suggestBuilder = new SuggestBuilder(); - suggestBuilder.addSuggestion( - new SuggestBuilder.SuggestionBuilder("someName", "custom") { - @Override - protected XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { - builder.field("field", randomField); - builder.field("suffix", randomSuffix); - return builder; - } - }.text(randomText) - ); + suggestBuilder.addSuggestion("someName", new CustomSuggestionBuilder(randomField, randomSuffix).text(randomText)); SearchRequestBuilder searchRequestBuilder = client().prepareSearch("test").setTypes("test").setFrom(0).setSize(1) .suggest(suggestBuilder); @@ -76,11 +80,121 @@ public class CustomSuggesterSearchIT extends ESIntegTestCase { // TODO: infer type once JI-9019884 is fixed // TODO: see also JDK-8039214 - List> suggestions - = CollectionUtils.>iterableAsArrayList(searchResponse.getSuggest().getSuggestion("someName")); + List> suggestions = + CollectionUtils.>iterableAsArrayList( + searchResponse.getSuggest().getSuggestion("someName")); assertThat(suggestions, hasSize(2)); - assertThat(suggestions.get(0).getText().string(), is(String.format(Locale.ROOT, "%s-%s-%s-12", randomText, randomField, randomSuffix))); - assertThat(suggestions.get(1).getText().string(), is(String.format(Locale.ROOT, "%s-%s-%s-123", randomText, randomField, randomSuffix))); + assertThat(suggestions.get(0).getText().string(), + is(String.format(Locale.ROOT, "%s-%s-%s-12", randomText, randomField, randomSuffix))); + assertThat(suggestions.get(1).getText().string(), + is(String.format(Locale.ROOT, "%s-%s-%s-123", randomText, randomField, randomSuffix))); + } + + public static class CustomSuggestionBuilder extends SuggestionBuilder { + + public final static CustomSuggestionBuilder PROTOTYPE = new CustomSuggestionBuilder("_na_", "_na_"); + protected static final ParseField RANDOM_SUFFIX_FIELD = new ParseField("suffix"); + + private String randomSuffix; + + public CustomSuggestionBuilder(String randomField, String randomSuffix) { + super(randomField); + this.randomSuffix = randomSuffix; + } + + @Override + protected XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(RANDOM_SUFFIX_FIELD.getPreferredName(), randomSuffix); + return builder; + } + + @Override + public String getWriteableName() { + return "custom"; + } + + @Override + public void doWriteTo(StreamOutput out) throws IOException { + out.writeString(randomSuffix); + } + + @Override + public CustomSuggestionBuilder doReadFrom(StreamInput in, String field) throws IOException { + return new CustomSuggestionBuilder(field, in.readString()); + } + + @Override + protected boolean doEquals(CustomSuggestionBuilder other) { + return Objects.equals(randomSuffix, other.randomSuffix); + } + + @Override + protected int doHashCode() { + return Objects.hash(randomSuffix); + } + + @Override + protected CustomSuggestionBuilder innerFromXContent(QueryParseContext parseContext) throws IOException { + XContentParser parser = parseContext.parser(); + ParseFieldMatcher parseFieldMatcher = parseContext.parseFieldMatcher(); + XContentParser.Token token; + String currentFieldName = null; + String fieldname = null; + String suffix = null; + String analyzer = null; + int sizeField = -1; + int shardSize = -1; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (parseFieldMatcher.match(currentFieldName, SuggestionBuilder.ANALYZER_FIELD)) { + analyzer = parser.text(); + } else if (parseFieldMatcher.match(currentFieldName, SuggestionBuilder.FIELDNAME_FIELD)) { + fieldname = parser.text(); + } else if (parseFieldMatcher.match(currentFieldName, SuggestionBuilder.SIZE_FIELD)) { + sizeField = parser.intValue(); + } else if (parseFieldMatcher.match(currentFieldName, SuggestionBuilder.SHARDSIZE_FIELD)) { + shardSize = parser.intValue(); + } else if (parseFieldMatcher.match(currentFieldName, RANDOM_SUFFIX_FIELD)) { + suffix = parser.text(); + } + } else { + throw new ParsingException(parser.getTokenLocation(), + "suggester[custom] doesn't support field [" + currentFieldName + "]"); + } + } + + // now we should have field name, check and copy fields over to the suggestion builder we return + if (fieldname == null) { + throw new ParsingException(parser.getTokenLocation(), "the required field option is missing"); + } + CustomSuggestionBuilder builder = new CustomSuggestionBuilder(fieldname, suffix); + if (analyzer != null) { + builder.analyzer(analyzer); + } + if (sizeField != -1) { + builder.size(sizeField); + } + if (shardSize != -1) { + builder.shardSize(shardSize); + } + return builder; + } + + @Override + public SuggestionContext build(QueryShardContext context) throws IOException { + Map options = new HashMap<>(); + options.put(FIELDNAME_FIELD.getPreferredName(), field()); + options.put(RANDOM_SUFFIX_FIELD.getPreferredName(), randomSuffix); + CustomSuggester.CustomSuggestionsContext customSuggestionsContext = + new CustomSuggester.CustomSuggestionsContext(context, options); + customSuggestionsContext.setField(field()); + assert text != null; + customSuggestionsContext.setText(BytesRefs.toBytesRef(text)); + return customSuggestionsContext; + } + } } diff --git a/core/src/test/java/org/elasticsearch/search/suggest/SuggestBuilderTests.java b/core/src/test/java/org/elasticsearch/search/suggest/SuggestBuilderTests.java new file mode 100644 index 00000000000..ea3ff5c7477 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/suggest/SuggestBuilderTests.java @@ -0,0 +1,166 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.suggest; + +import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.query.QueryParseContext; +import org.elasticsearch.search.suggest.completion.CompletionSuggesterBuilderTests; +import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder; +import org.elasticsearch.search.suggest.completion.WritableTestCase; +import org.elasticsearch.search.suggest.phrase.Laplace; +import org.elasticsearch.search.suggest.phrase.LinearInterpolation; +import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder; +import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilderTests; +import org.elasticsearch.search.suggest.phrase.SmoothingModel; +import org.elasticsearch.search.suggest.phrase.StupidBackoff; +import org.elasticsearch.search.suggest.term.TermSuggestionBuilder; +import org.elasticsearch.search.suggest.term.TermSuggestionBuilderTests; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map.Entry; + +public class SuggestBuilderTests extends WritableTestCase { + + private static NamedWriteableRegistry namedWriteableRegistry; + + /** + * Setup for the whole base test class. + */ + @BeforeClass + public static void init() { + NamedWriteableRegistry nwRegistry = new NamedWriteableRegistry(); + nwRegistry.registerPrototype(SuggestionBuilder.class, TermSuggestionBuilder.PROTOTYPE); + nwRegistry.registerPrototype(SuggestionBuilder.class, PhraseSuggestionBuilder.PROTOTYPE); + nwRegistry.registerPrototype(SuggestionBuilder.class, CompletionSuggestionBuilder.PROTOTYPE); + nwRegistry.registerPrototype(SmoothingModel.class, Laplace.PROTOTYPE); + nwRegistry.registerPrototype(SmoothingModel.class, LinearInterpolation.PROTOTYPE); + nwRegistry.registerPrototype(SmoothingModel.class, StupidBackoff.PROTOTYPE); + namedWriteableRegistry = nwRegistry; + } + + @AfterClass + public static void afterClass() { + namedWriteableRegistry = null; + } + + @Override + protected NamedWriteableRegistry provideNamedWritableRegistry() { + return namedWriteableRegistry; + } + + /** + * creates random suggestion builder, renders it to xContent and back to new instance that should be equal to original + */ + public void testFromXContent() throws IOException { + Suggesters suggesters = new Suggesters(Collections.emptyMap()); + QueryParseContext context = new QueryParseContext(null); + context.parseFieldMatcher(new ParseFieldMatcher(Settings.EMPTY)); + for (int runs = 0; runs < NUMBER_OF_RUNS; runs++) { + SuggestBuilder suggestBuilder = createTestModel(); + XContentBuilder xContentBuilder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + if (randomBoolean()) { + xContentBuilder.prettyPrint(); + } + suggestBuilder.toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS); + XContentParser parser = XContentHelper.createParser(xContentBuilder.bytes()); + context.reset(parser); + + SuggestBuilder secondSuggestBuilder = SuggestBuilder.fromXContent(context, suggesters); + assertNotSame(suggestBuilder, secondSuggestBuilder); + assertEquals(suggestBuilder, secondSuggestBuilder); + assertEquals(suggestBuilder.hashCode(), secondSuggestBuilder.hashCode()); + } + } + + public void testIllegalSuggestionName() { + try { + new SuggestBuilder().addSuggestion(null, PhraseSuggestionBuilderTests.randomPhraseSuggestionBuilder()); + fail("exception expected"); + } catch (NullPointerException e) { + assertEquals("every suggestion needs a name", e.getMessage()); + } + + try { + new SuggestBuilder().addSuggestion("my-suggest", PhraseSuggestionBuilderTests.randomPhraseSuggestionBuilder()) + .addSuggestion("my-suggest", PhraseSuggestionBuilderTests.randomPhraseSuggestionBuilder()); + fail("exception expected"); + } catch (IllegalArgumentException e) { + assertEquals("already added another suggestion with name [my-suggest]", e.getMessage()); + } + } + + @Override + protected SuggestBuilder createTestModel() { + return randomSuggestBuilder(); + } + + @Override + protected SuggestBuilder createMutation(SuggestBuilder original) throws IOException { + SuggestBuilder mutation = new SuggestBuilder().setGlobalText(original.getGlobalText()); + for (Entry> suggestionBuilder : original.getSuggestions().entrySet()) { + mutation.addSuggestion(suggestionBuilder.getKey(), suggestionBuilder.getValue()); + } + if (randomBoolean()) { + mutation.setGlobalText(randomAsciiOfLengthBetween(5, 60)); + } else { + mutation.addSuggestion(randomAsciiOfLength(10), PhraseSuggestionBuilderTests.randomPhraseSuggestionBuilder()); + } + return mutation; + } + + @Override + protected SuggestBuilder readFrom(StreamInput in) throws IOException { + return SuggestBuilder.PROTOTYPE.readFrom(in); + } + + public static SuggestBuilder randomSuggestBuilder() { + SuggestBuilder builder = new SuggestBuilder(); + if (randomBoolean()) { + builder.setGlobalText(randomAsciiOfLengthBetween(1, 20)); + } + final int numSuggestions = randomIntBetween(1, 5); + for (int i = 0; i < numSuggestions; i++) { + builder.addSuggestion(randomAsciiOfLengthBetween(5, 10), randomSuggestionBuilder()); + } + return builder; + } + + private static SuggestionBuilder randomSuggestionBuilder() { + switch (randomIntBetween(0, 2)) { + case 0: return TermSuggestionBuilderTests.randomTermSuggestionBuilder(); + case 1: return PhraseSuggestionBuilderTests.randomPhraseSuggestionBuilder(); + case 2: return CompletionSuggesterBuilderTests.randomCompletionSuggestionBuilder(); + default: return TermSuggestionBuilderTests.randomTermSuggestionBuilder(); + } + } + +} diff --git a/core/src/test/java/org/elasticsearch/search/suggest/completion/CategoryContextMappingTests.java b/core/src/test/java/org/elasticsearch/search/suggest/completion/CategoryContextMappingTests.java index 0d27ba04a91..e0aabe77302 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/completion/CategoryContextMappingTests.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/completion/CategoryContextMappingTests.java @@ -190,11 +190,11 @@ public class CategoryContextMappingTests extends ESSingleNodeTestCase { XContentBuilder builder = jsonBuilder().value("context1"); XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(builder.bytes()); CategoryContextMapping mapping = ContextBuilder.category("cat").build(); - List queryContexts = mapping.parseQueryContext(parser); - assertThat(queryContexts.size(), equalTo(1)); - assertThat(queryContexts.get(0).context, equalTo("context1")); - assertThat(queryContexts.get(0).boost, equalTo(1)); - assertThat(queryContexts.get(0).isPrefix, equalTo(false)); + List internalQueryContexts = mapping.parseQueryContext(parser); + assertThat(internalQueryContexts.size(), equalTo(1)); + assertThat(internalQueryContexts.get(0).context, equalTo("context1")); + assertThat(internalQueryContexts.get(0).boost, equalTo(1)); + assertThat(internalQueryContexts.get(0).isPrefix, equalTo(false)); } public void testQueryContextParsingArray() throws Exception { @@ -204,14 +204,14 @@ public class CategoryContextMappingTests extends ESSingleNodeTestCase { .endArray(); XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(builder.bytes()); CategoryContextMapping mapping = ContextBuilder.category("cat").build(); - List queryContexts = mapping.parseQueryContext(parser); - assertThat(queryContexts.size(), equalTo(2)); - assertThat(queryContexts.get(0).context, equalTo("context1")); - assertThat(queryContexts.get(0).boost, equalTo(1)); - assertThat(queryContexts.get(0).isPrefix, equalTo(false)); - assertThat(queryContexts.get(1).context, equalTo("context2")); - assertThat(queryContexts.get(1).boost, equalTo(1)); - assertThat(queryContexts.get(1).isPrefix, equalTo(false)); + List internalQueryContexts = mapping.parseQueryContext(parser); + assertThat(internalQueryContexts.size(), equalTo(2)); + assertThat(internalQueryContexts.get(0).context, equalTo("context1")); + assertThat(internalQueryContexts.get(0).boost, equalTo(1)); + assertThat(internalQueryContexts.get(0).isPrefix, equalTo(false)); + assertThat(internalQueryContexts.get(1).context, equalTo("context2")); + assertThat(internalQueryContexts.get(1).boost, equalTo(1)); + assertThat(internalQueryContexts.get(1).isPrefix, equalTo(false)); } public void testQueryContextParsingObject() throws Exception { @@ -222,11 +222,11 @@ public class CategoryContextMappingTests extends ESSingleNodeTestCase { .endObject(); XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(builder.bytes()); CategoryContextMapping mapping = ContextBuilder.category("cat").build(); - List queryContexts = mapping.parseQueryContext(parser); - assertThat(queryContexts.size(), equalTo(1)); - assertThat(queryContexts.get(0).context, equalTo("context1")); - assertThat(queryContexts.get(0).boost, equalTo(10)); - assertThat(queryContexts.get(0).isPrefix, equalTo(true)); + List internalQueryContexts = mapping.parseQueryContext(parser); + assertThat(internalQueryContexts.size(), equalTo(1)); + assertThat(internalQueryContexts.get(0).context, equalTo("context1")); + assertThat(internalQueryContexts.get(0).boost, equalTo(10)); + assertThat(internalQueryContexts.get(0).isPrefix, equalTo(true)); } @@ -245,14 +245,14 @@ public class CategoryContextMappingTests extends ESSingleNodeTestCase { .endArray(); XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(builder.bytes()); CategoryContextMapping mapping = ContextBuilder.category("cat").build(); - List queryContexts = mapping.parseQueryContext(parser); - assertThat(queryContexts.size(), equalTo(2)); - assertThat(queryContexts.get(0).context, equalTo("context1")); - assertThat(queryContexts.get(0).boost, equalTo(2)); - assertThat(queryContexts.get(0).isPrefix, equalTo(true)); - assertThat(queryContexts.get(1).context, equalTo("context2")); - assertThat(queryContexts.get(1).boost, equalTo(3)); - assertThat(queryContexts.get(1).isPrefix, equalTo(false)); + List internalQueryContexts = mapping.parseQueryContext(parser); + assertThat(internalQueryContexts.size(), equalTo(2)); + assertThat(internalQueryContexts.get(0).context, equalTo("context1")); + assertThat(internalQueryContexts.get(0).boost, equalTo(2)); + assertThat(internalQueryContexts.get(0).isPrefix, equalTo(true)); + assertThat(internalQueryContexts.get(1).context, equalTo("context2")); + assertThat(internalQueryContexts.get(1).boost, equalTo(3)); + assertThat(internalQueryContexts.get(1).isPrefix, equalTo(false)); } public void testQueryContextParsingMixed() throws Exception { @@ -266,14 +266,14 @@ public class CategoryContextMappingTests extends ESSingleNodeTestCase { .endArray(); XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(builder.bytes()); CategoryContextMapping mapping = ContextBuilder.category("cat").build(); - List queryContexts = mapping.parseQueryContext(parser); - assertThat(queryContexts.size(), equalTo(2)); - assertThat(queryContexts.get(0).context, equalTo("context1")); - assertThat(queryContexts.get(0).boost, equalTo(2)); - assertThat(queryContexts.get(0).isPrefix, equalTo(true)); - assertThat(queryContexts.get(1).context, equalTo("context2")); - assertThat(queryContexts.get(1).boost, equalTo(1)); - assertThat(queryContexts.get(1).isPrefix, equalTo(false)); + List internalQueryContexts = mapping.parseQueryContext(parser); + assertThat(internalQueryContexts.size(), equalTo(2)); + assertThat(internalQueryContexts.get(0).context, equalTo("context1")); + assertThat(internalQueryContexts.get(0).boost, equalTo(2)); + assertThat(internalQueryContexts.get(0).isPrefix, equalTo(true)); + assertThat(internalQueryContexts.get(1).context, equalTo("context2")); + assertThat(internalQueryContexts.get(1).boost, equalTo(1)); + assertThat(internalQueryContexts.get(1).isPrefix, equalTo(false)); } public void testParsingContextFromDocument() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/search/suggest/completion/CategoryQueryContextTests.java b/core/src/test/java/org/elasticsearch/search/suggest/completion/CategoryQueryContextTests.java new file mode 100644 index 00000000000..a4cfc71a3bc --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/suggest/completion/CategoryQueryContextTests.java @@ -0,0 +1,76 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.suggest.completion; + +import org.elasticsearch.search.suggest.completion.context.CategoryQueryContext; + +import java.io.IOException; + +public class CategoryQueryContextTests extends QueryContextTestCase { + + public static CategoryQueryContext randomCategoryQueryContext() { + final CategoryQueryContext.Builder builder = CategoryQueryContext.builder(); + builder.setCategory(randomAsciiOfLength(10)); + maybeSet(builder::setBoost, randomIntBetween(1, 10)); + maybeSet(builder::setPrefix, randomBoolean()); + return builder.build(); + } + + @Override + protected CategoryQueryContext createTestModel() { + return randomCategoryQueryContext(); + } + + @Override + protected CategoryQueryContext prototype() { + return CategoryQueryContext.PROTOTYPE; + } + + public void testNullCategoryIsIllegal() { + final CategoryQueryContext categoryQueryContext = randomCategoryQueryContext(); + final CategoryQueryContext.Builder builder = CategoryQueryContext.builder() + .setBoost(categoryQueryContext.getBoost()) + .setPrefix(categoryQueryContext.isPrefix()); + try { + builder.build(); + fail("null category is illegal"); + } catch (NullPointerException e) { + assertEquals(e.getMessage(), "category must not be null"); + } + } + + public void testIllegalArguments() { + final CategoryQueryContext.Builder builder = CategoryQueryContext.builder(); + + try { + builder.setCategory(null); + fail("category must not be null"); + } catch (NullPointerException e) { + assertEquals(e.getMessage(), "category must not be null"); + } + + try { + builder.setBoost(-randomIntBetween(1, Integer.MAX_VALUE)); + fail("boost must be positive"); + } catch (IllegalArgumentException e) { + assertEquals(e.getMessage(), "boost must be greater than 0"); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/search/suggest/completion/CompletionSuggesterBuilderTests.java b/core/src/test/java/org/elasticsearch/search/suggest/completion/CompletionSuggesterBuilderTests.java new file mode 100644 index 00000000000..6551f2370a7 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/suggest/completion/CompletionSuggesterBuilderTests.java @@ -0,0 +1,174 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.suggest.completion; + +import com.carrotsearch.randomizedtesting.generators.RandomStrings; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.unit.Fuzziness; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.core.CompletionFieldMapper; +import org.elasticsearch.search.suggest.AbstractSuggestionBuilderTestCase; +import org.elasticsearch.search.suggest.SuggestBuilder; +import org.elasticsearch.search.suggest.SuggestionSearchContext.SuggestionContext; +import org.elasticsearch.search.suggest.completion.context.CategoryContextMapping; +import org.elasticsearch.search.suggest.completion.context.CategoryQueryContext; +import org.elasticsearch.search.suggest.completion.context.ContextMapping; +import org.elasticsearch.search.suggest.completion.context.ContextMappings; +import org.elasticsearch.search.suggest.completion.context.GeoContextMapping; +import org.elasticsearch.search.suggest.completion.context.GeoQueryContext; +import org.elasticsearch.search.suggest.completion.context.QueryContext; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.hamcrest.core.IsInstanceOf.instanceOf; +import static org.hamcrest.Matchers.containsString; + +public class CompletionSuggesterBuilderTests extends AbstractSuggestionBuilderTestCase { + + @Override + protected CompletionSuggestionBuilder randomSuggestionBuilder() { + return randomCompletionSuggestionBuilder(); + } + + public static CompletionSuggestionBuilder randomCompletionSuggestionBuilder() { + return randomSuggestionBuilderWithContextInfo().builder; + } + + private static class BuilderAndInfo { + CompletionSuggestionBuilder builder; + List catContexts = new ArrayList<>(); + List geoContexts = new ArrayList<>(); + } + + private static BuilderAndInfo randomSuggestionBuilderWithContextInfo() { + final BuilderAndInfo builderAndInfo = new BuilderAndInfo(); + CompletionSuggestionBuilder testBuilder = new CompletionSuggestionBuilder(randomAsciiOfLengthBetween(2, 20)); + setCommonPropertiesOnRandomBuilder(testBuilder); + switch (randomIntBetween(0, 3)) { + case 0: + testBuilder.prefix(randomAsciiOfLength(10)); + break; + case 1: + testBuilder.prefix(randomAsciiOfLength(10), FuzzyOptionsTests.randomFuzzyOptions()); + break; + case 2: + testBuilder.prefix(randomAsciiOfLength(10), randomFrom(Fuzziness.ZERO, Fuzziness.ONE, Fuzziness.TWO)); + break; + case 3: + testBuilder.regex(randomAsciiOfLength(10), RegexOptionsTests.randomRegexOptions()); + break; + } + List payloads = new ArrayList<>(); + Collections.addAll(payloads, generateRandomStringArray(5, 10, false, false)); + maybeSet(testBuilder::payload, payloads); + Map> contextMap = new HashMap<>(); + if (randomBoolean()) { + int numContext = randomIntBetween(1, 5); + List contexts = new ArrayList<>(numContext); + for (int i = 0; i < numContext; i++) { + contexts.add(CategoryQueryContextTests.randomCategoryQueryContext()); + } + String name = randomAsciiOfLength(10); + contextMap.put(name, contexts); + builderAndInfo.catContexts.add(name); + } + if (randomBoolean()) { + int numContext = randomIntBetween(1, 5); + List contexts = new ArrayList<>(numContext); + for (int i = 0; i < numContext; i++) { + contexts.add(GeoQueryContextTests.randomGeoQueryContext()); + } + String name = randomAsciiOfLength(10); + contextMap.put(name, contexts); + builderAndInfo.geoContexts.add(name); + } + testBuilder.contexts(contextMap); + builderAndInfo.builder = testBuilder; + return builderAndInfo; + } + + @Override + protected void mutateSpecificParameters(CompletionSuggestionBuilder builder) throws IOException { + switch (randomIntBetween(0, 5)) { + case 0: + List payloads = new ArrayList<>(); + Collections.addAll(payloads, generateRandomStringArray(5, 10, false, false)); + builder.payload(payloads); + break; + case 1: + int nCatContext = randomIntBetween(1, 5); + List contexts = new ArrayList<>(nCatContext); + for (int i = 0; i < nCatContext; i++) { + contexts.add(CategoryQueryContextTests.randomCategoryQueryContext()); + } + builder.contexts(Collections.singletonMap(randomAsciiOfLength(10), contexts)); + break; + case 2: + int nGeoContext = randomIntBetween(1, 5); + List geoContexts = new ArrayList<>(nGeoContext); + for (int i = 0; i < nGeoContext; i++) { + geoContexts.add(GeoQueryContextTests.randomGeoQueryContext()); + } + builder.contexts(Collections.singletonMap(randomAsciiOfLength(10), geoContexts)); + break; + case 3: + builder.prefix(randomAsciiOfLength(10), FuzzyOptionsTests.randomFuzzyOptions()); + break; + case 4: + builder.prefix(randomAsciiOfLength(10), randomFrom(Fuzziness.ZERO, Fuzziness.ONE, Fuzziness.TWO)); + break; + case 5: + builder.regex(randomAsciiOfLength(10), RegexOptionsTests.randomRegexOptions()); + break; + default: + throw new IllegalStateException("should not through"); + } + } + + /** + * Test that a malformed JSON suggestion request fails. + */ + public void testMalformedJsonRequestPayload() throws Exception { + final String field = RandomStrings.randomAsciiOfLength(getRandom(), 10).toLowerCase(Locale.ROOT); + final String payload = "{\n" + + " \"bad-payload\" : { \n" + + " \"prefix\" : \"sug\",\n" + + " \"completion\" : { \n" + + " \"field\" : \"" + field + "\",\n " + + " \"payload\" : [ {\"payload\":\"field\"} ]\n" + + " }\n" + + " }\n" + + "}\n"; + try { + final SuggestBuilder suggestBuilder = SuggestBuilder.fromXContent(newParseContext(payload), suggesters); + fail("Should not have been able to create SuggestBuilder from malformed JSON: " + suggestBuilder); + } catch (ParsingException e) { + assertThat(e.getMessage(), containsString("failed to parse field [payload]")); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/search/suggest/completion/FuzzyOptionsTests.java b/core/src/test/java/org/elasticsearch/search/suggest/completion/FuzzyOptionsTests.java new file mode 100644 index 00000000000..848a9088bc3 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/suggest/completion/FuzzyOptionsTests.java @@ -0,0 +1,131 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.suggest.completion; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.unit.Fuzziness; + +import java.io.IOException; + +public class FuzzyOptionsTests extends WritableTestCase { + + public static FuzzyOptions randomFuzzyOptions() { + final FuzzyOptions.Builder builder = FuzzyOptions.builder(); + if (randomBoolean()) { + maybeSet(builder::setFuzziness, randomFrom(Fuzziness.ZERO, Fuzziness.ONE, Fuzziness.TWO)); + } else { + maybeSet(builder::setFuzziness, randomFrom(0, 1, 2)); + } + maybeSet(builder::setFuzzyMinLength, randomIntBetween(0, 10)); + maybeSet(builder::setFuzzyPrefixLength, randomIntBetween(0, 10)); + maybeSet(builder::setMaxDeterminizedStates, randomIntBetween(1, 1000)); + maybeSet(builder::setTranspositions, randomBoolean()); + maybeSet(builder::setUnicodeAware, randomBoolean()); + return builder.build(); + } + + @Override + protected FuzzyOptions createTestModel() { + return randomFuzzyOptions(); + } + + @Override + protected FuzzyOptions createMutation(FuzzyOptions original) throws IOException { + final FuzzyOptions.Builder builder = FuzzyOptions.builder(); + builder.setFuzziness(original.getEditDistance()) + .setFuzzyPrefixLength(original.getFuzzyPrefixLength()) + .setFuzzyMinLength(original.getFuzzyMinLength()) + .setMaxDeterminizedStates(original.getMaxDeterminizedStates()) + .setTranspositions(original.isTranspositions()) + .setUnicodeAware(original.isUnicodeAware()); + switch (randomIntBetween(0, 5)) { + case 0: + builder.setFuzziness(randomValueOtherThan(original.getEditDistance(), () -> randomFrom(0, 1, 2))); + break; + case 1: + builder.setFuzzyPrefixLength(randomValueOtherThan(original.getFuzzyPrefixLength(), () -> + randomIntBetween(1, 3))); + break; + case 2: + builder.setFuzzyMinLength(randomValueOtherThan(original.getFuzzyMinLength(), () -> + randomIntBetween(1, 3))); + break; + case 3: + builder.setMaxDeterminizedStates(randomValueOtherThan(original.getMaxDeterminizedStates(), () -> + randomIntBetween(1, 10))); + break; + case 4: + builder.setTranspositions(!original.isTranspositions()); + break; + case 5: + builder.setUnicodeAware(!original.isUnicodeAware()); + break; + } + return builder.build(); + } + + @Override + protected FuzzyOptions readFrom(StreamInput in) throws IOException { + return FuzzyOptions.readFuzzyOptions(in); + } + + public void testIllegalArguments() { + final FuzzyOptions.Builder builder = FuzzyOptions.builder(); + try { + builder.setFuzziness(-randomIntBetween(1, Integer.MAX_VALUE)); + fail("fuzziness must be > 0"); + } catch (IllegalArgumentException e) { + assertEquals(e.getMessage(), "fuzziness must be between 0 and 2"); + } + try { + builder.setFuzziness(randomIntBetween(3, Integer.MAX_VALUE)); + fail("fuzziness must be < 2"); + } catch (IllegalArgumentException e) { + assertEquals(e.getMessage(), "fuzziness must be between 0 and 2"); + } + try { + builder.setFuzziness(null); + fail("fuzziness must not be null"); + } catch (NullPointerException e) { + assertEquals(e.getMessage(), "fuzziness must not be null"); + } + + try { + builder.setFuzzyMinLength(-randomIntBetween(1, Integer.MAX_VALUE)); + fail("fuzzyMinLength must be >= 0"); + } catch (IllegalArgumentException e) { + assertEquals(e.getMessage(), "fuzzyMinLength must not be negative"); + } + + try { + builder.setFuzzyPrefixLength(-randomIntBetween(1, Integer.MAX_VALUE)); + fail("fuzzyPrefixLength must be >= 0"); + } catch (IllegalArgumentException e) { + assertEquals(e.getMessage(), "fuzzyPrefixLength must not be negative"); + } + + try { + builder.setMaxDeterminizedStates(-randomIntBetween(1, Integer.MAX_VALUE)); + fail("max determinized state must be >= 0"); + } catch (IllegalArgumentException e) { + assertEquals(e.getMessage(), "maxDeterminizedStates must not be negative"); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/search/suggest/completion/GeoContextMappingTests.java b/core/src/test/java/org/elasticsearch/search/suggest/completion/GeoContextMappingTests.java index 471de9c3e93..c16b0ce645b 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/completion/GeoContextMappingTests.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/completion/GeoContextMappingTests.java @@ -202,15 +202,15 @@ public class GeoContextMappingTests extends ESSingleNodeTestCase { XContentBuilder builder = jsonBuilder().value("ezs42e44yx96"); XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(builder.bytes()); GeoContextMapping mapping = ContextBuilder.geo("geo").build(); - List queryContexts = mapping.parseQueryContext(parser); - assertThat(queryContexts.size(), equalTo(1 + 8)); + List internalQueryContexts = mapping.parseQueryContext(parser); + assertThat(internalQueryContexts.size(), equalTo(1 + 8)); Collection locations = new ArrayList<>(); locations.add("ezs42e"); addNeighbors("ezs42e", GeoContextMapping.DEFAULT_PRECISION, locations); - for (ContextMapping.QueryContext queryContext : queryContexts) { - assertThat(queryContext.context, isIn(locations)); - assertThat(queryContext.boost, equalTo(1)); - assertThat(queryContext.isPrefix, equalTo(false)); + for (ContextMapping.InternalQueryContext internalQueryContext : internalQueryContexts) { + assertThat(internalQueryContext.context, isIn(locations)); + assertThat(internalQueryContext.boost, equalTo(1)); + assertThat(internalQueryContext.isPrefix, equalTo(false)); } } @@ -221,15 +221,15 @@ public class GeoContextMappingTests extends ESSingleNodeTestCase { .endObject(); XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(builder.bytes()); GeoContextMapping mapping = ContextBuilder.geo("geo").build(); - List queryContexts = mapping.parseQueryContext(parser); - assertThat(queryContexts.size(), equalTo(1 + 8)); + List internalQueryContexts = mapping.parseQueryContext(parser); + assertThat(internalQueryContexts.size(), equalTo(1 + 8)); Collection locations = new ArrayList<>(); locations.add("wh0n94"); addNeighbors("wh0n94", GeoContextMapping.DEFAULT_PRECISION, locations); - for (ContextMapping.QueryContext queryContext : queryContexts) { - assertThat(queryContext.context, isIn(locations)); - assertThat(queryContext.boost, equalTo(1)); - assertThat(queryContext.isPrefix, equalTo(false)); + for (ContextMapping.InternalQueryContext internalQueryContext : internalQueryContexts) { + assertThat(internalQueryContext.context, isIn(locations)); + assertThat(internalQueryContext.boost, equalTo(1)); + assertThat(internalQueryContext.isPrefix, equalTo(false)); } } @@ -244,8 +244,8 @@ public class GeoContextMappingTests extends ESSingleNodeTestCase { .endObject(); XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(builder.bytes()); GeoContextMapping mapping = ContextBuilder.geo("geo").build(); - List queryContexts = mapping.parseQueryContext(parser); - assertThat(queryContexts.size(), equalTo(1 + 1 + 8 + 1 + 8 + 1 + 8)); + List internalQueryContexts = mapping.parseQueryContext(parser); + assertThat(internalQueryContexts.size(), equalTo(1 + 1 + 8 + 1 + 8 + 1 + 8)); Collection locations = new ArrayList<>(); locations.add("wh0n94"); locations.add("w"); @@ -254,10 +254,10 @@ public class GeoContextMappingTests extends ESSingleNodeTestCase { addNeighbors("wh", 2, locations); locations.add("wh0"); addNeighbors("wh0", 3, locations); - for (ContextMapping.QueryContext queryContext : queryContexts) { - assertThat(queryContext.context, isIn(locations)); - assertThat(queryContext.boost, equalTo(10)); - assertThat(queryContext.isPrefix, equalTo(queryContext.context.length() < GeoContextMapping.DEFAULT_PRECISION)); + for (ContextMapping.InternalQueryContext internalQueryContext : internalQueryContexts) { + assertThat(internalQueryContext.context, isIn(locations)); + assertThat(internalQueryContext.boost, equalTo(10)); + assertThat(internalQueryContext.isPrefix, equalTo(internalQueryContext.context.length() < GeoContextMapping.DEFAULT_PRECISION)); } } @@ -282,8 +282,8 @@ public class GeoContextMappingTests extends ESSingleNodeTestCase { .endArray(); XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(builder.bytes()); GeoContextMapping mapping = ContextBuilder.geo("geo").build(); - List queryContexts = mapping.parseQueryContext(parser); - assertThat(queryContexts.size(), equalTo(1 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 1 + 8)); + List internalQueryContexts = mapping.parseQueryContext(parser); + assertThat(internalQueryContexts.size(), equalTo(1 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 1 + 8)); Collection firstLocations = new ArrayList<>(); firstLocations.add("wh0n94"); firstLocations.add("w"); @@ -296,15 +296,15 @@ public class GeoContextMappingTests extends ESSingleNodeTestCase { secondLocations.add("w5cx04"); secondLocations.add("w5cx0"); addNeighbors("w5cx0", 5, secondLocations); - for (ContextMapping.QueryContext queryContext : queryContexts) { - if (firstLocations.contains(queryContext.context)) { - assertThat(queryContext.boost, equalTo(10)); - } else if (secondLocations.contains(queryContext.context)) { - assertThat(queryContext.boost, equalTo(2)); + for (ContextMapping.InternalQueryContext internalQueryContext : internalQueryContexts) { + if (firstLocations.contains(internalQueryContext.context)) { + assertThat(internalQueryContext.boost, equalTo(10)); + } else if (secondLocations.contains(internalQueryContext.context)) { + assertThat(internalQueryContext.boost, equalTo(2)); } else { - fail(queryContext.context + " was not expected"); + fail(internalQueryContext.context + " was not expected"); } - assertThat(queryContext.isPrefix, equalTo(queryContext.context.length() < GeoContextMapping.DEFAULT_PRECISION)); + assertThat(internalQueryContext.isPrefix, equalTo(internalQueryContext.context.length() < GeoContextMapping.DEFAULT_PRECISION)); } } @@ -325,8 +325,8 @@ public class GeoContextMappingTests extends ESSingleNodeTestCase { .endArray(); XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(builder.bytes()); GeoContextMapping mapping = ContextBuilder.geo("geo").build(); - List queryContexts = mapping.parseQueryContext(parser); - assertThat(queryContexts.size(), equalTo(1 + 1 + 8 + 1 + 8 + 1 + 8)); + List internalQueryContexts = mapping.parseQueryContext(parser); + assertThat(internalQueryContexts.size(), equalTo(1 + 1 + 8 + 1 + 8 + 1 + 8)); Collection firstLocations = new ArrayList<>(); firstLocations.add("wh0n94"); firstLocations.add("w"); @@ -336,15 +336,15 @@ public class GeoContextMappingTests extends ESSingleNodeTestCase { Collection secondLocations = new ArrayList<>(); secondLocations.add("w5cx04"); addNeighbors("w5cx04", 6, secondLocations); - for (ContextMapping.QueryContext queryContext : queryContexts) { - if (firstLocations.contains(queryContext.context)) { - assertThat(queryContext.boost, equalTo(10)); - } else if (secondLocations.contains(queryContext.context)) { - assertThat(queryContext.boost, equalTo(1)); + for (ContextMapping.InternalQueryContext internalQueryContext : internalQueryContexts) { + if (firstLocations.contains(internalQueryContext.context)) { + assertThat(internalQueryContext.boost, equalTo(10)); + } else if (secondLocations.contains(internalQueryContext.context)) { + assertThat(internalQueryContext.boost, equalTo(1)); } else { - fail(queryContext.context + " was not expected"); + fail(internalQueryContext.context + " was not expected"); } - assertThat(queryContext.isPrefix, equalTo(queryContext.context.length() < GeoContextMapping.DEFAULT_PRECISION)); + assertThat(internalQueryContext.isPrefix, equalTo(internalQueryContext.context.length() < GeoContextMapping.DEFAULT_PRECISION)); } } } diff --git a/core/src/test/java/org/elasticsearch/search/suggest/completion/GeoQueryContextTests.java b/core/src/test/java/org/elasticsearch/search/suggest/completion/GeoQueryContextTests.java new file mode 100644 index 00000000000..1f724967820 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/suggest/completion/GeoQueryContextTests.java @@ -0,0 +1,107 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.suggest.completion; + +import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.search.suggest.completion.context.GeoQueryContext; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +public class GeoQueryContextTests extends QueryContextTestCase { + + public static GeoQueryContext randomGeoQueryContext() { + final GeoQueryContext.Builder builder = GeoQueryContext.builder(); + builder.setGeoPoint(new GeoPoint(randomDouble(), randomDouble())); + maybeSet(builder::setBoost, randomIntBetween(1, 10)); + maybeSet(builder::setPrecision, randomIntBetween(1, 12)); + List neighbours = new ArrayList<>(); + for (int i = 0; i < randomIntBetween(1, 12); i++) { + neighbours.add(randomIntBetween(1, 12)); + } + maybeSet(builder::setNeighbours, neighbours); + return builder.build(); + } + + @Override + protected GeoQueryContext createTestModel() { + return randomGeoQueryContext(); + } + + @Override + protected GeoQueryContext prototype() { + return GeoQueryContext.PROTOTYPE; + } + + public void testNullGeoPointIsIllegal() { + final GeoQueryContext geoQueryContext = randomGeoQueryContext(); + final GeoQueryContext.Builder builder = GeoQueryContext.builder() + .setNeighbours(geoQueryContext.getNeighbours()) + .setPrecision(geoQueryContext.getPrecision()) + .setBoost(geoQueryContext.getBoost()); + try { + builder.build(); + fail("null geo point is illegal"); + } catch (NullPointerException e) { + assertThat(e.getMessage(), equalTo("geoPoint must not be null")); + } + } + + public void testIllegalArguments() { + final GeoQueryContext.Builder builder = GeoQueryContext.builder(); + + try { + builder.setGeoPoint(null); + fail("geoPoint must not be null"); + } catch (NullPointerException e) { + assertEquals(e.getMessage(), "geoPoint must not be null"); + } + try { + builder.setBoost(-randomIntBetween(1, Integer.MAX_VALUE)); + fail("boost must be positive"); + } catch (IllegalArgumentException e) { + assertEquals(e.getMessage(), "boost must be greater than 0"); + } + int precision = 0; + try { + do { + precision = randomInt(); + } while (precision >= 1 && precision <= 12); + builder.setPrecision(precision); + } catch (IllegalArgumentException e) { + assertEquals(e.getMessage(), "precision must be between 1 and 12"); + } + try { + List neighbours = new ArrayList<>(); + neighbours.add(precision); + for (int i = 1; i < randomIntBetween(1, 11); i++) { + neighbours.add(i); + } + Collections.shuffle(neighbours, random()); + builder.setNeighbours(neighbours); + } catch (IllegalArgumentException e) { + assertEquals(e.getMessage(), "neighbour value must be between 1 and 12"); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/search/suggest/completion/QueryContextTestCase.java b/core/src/test/java/org/elasticsearch/search/suggest/completion/QueryContextTestCase.java new file mode 100644 index 00000000000..78b73e68890 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/suggest/completion/QueryContextTestCase.java @@ -0,0 +1,63 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.suggest.completion; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.suggest.completion.context.QueryContext; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static junit.framework.TestCase.assertEquals; + + +public abstract class QueryContextTestCase extends ESTestCase { + + private static final int NUMBER_OF_RUNS = 20; + + /** + * create random model that is put under test + */ + protected abstract QC createTestModel(); + + /** + * query context prototype to read serialized format + */ + protected abstract QC prototype(); + + public void testToXContext() throws IOException { + for (int i = 0; i < NUMBER_OF_RUNS; i++) { + QueryContext toXContent = createTestModel(); + XContentBuilder builder = XContentFactory.jsonBuilder(); + toXContent.toXContent(builder, ToXContent.EMPTY_PARAMS); + BytesReference bytesReference = builder.bytes(); + XContentParser parser = XContentFactory.xContent(bytesReference).createParser(bytesReference); + parser.nextToken(); + QueryContext fromXContext = prototype().fromXContext(parser); + assertEquals(toXContent, fromXContext); + assertEquals(toXContent.hashCode(), fromXContext.hashCode()); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/search/suggest/completion/RegexOptionsTests.java b/core/src/test/java/org/elasticsearch/search/suggest/completion/RegexOptionsTests.java new file mode 100644 index 00000000000..082e2bc2687 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/suggest/completion/RegexOptionsTests.java @@ -0,0 +1,71 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.suggest.completion; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.index.query.RegexpFlag; + +import java.io.IOException; + +public class RegexOptionsTests extends WritableTestCase { + + public static RegexOptions randomRegexOptions() { + final RegexOptions.Builder builder = RegexOptions.builder(); + maybeSet(builder::setMaxDeterminizedStates, randomIntBetween(1, 1000)); + StringBuilder sb = new StringBuilder(); + for (RegexpFlag regexpFlag : RegexpFlag.values()) { + if (randomBoolean()) { + if (sb.length() != 0) { + sb.append("|"); + } + sb.append(regexpFlag.name()); + } + } + maybeSet(builder::setFlags, sb.toString()); + return builder.build(); + } + + @Override + protected RegexOptions createTestModel() { + return randomRegexOptions(); + } + + @Override + protected RegexOptions createMutation(RegexOptions original) throws IOException { + final RegexOptions.Builder builder = RegexOptions.builder(); + builder.setMaxDeterminizedStates(randomValueOtherThan(original.getMaxDeterminizedStates(), () -> randomIntBetween(1, 10))); + return builder.build(); + } + + @Override + protected RegexOptions readFrom(StreamInput in) throws IOException { + return RegexOptions.readRegexOptions(in); + } + + public void testIllegalArgument() { + final RegexOptions.Builder builder = RegexOptions.builder(); + try { + builder.setMaxDeterminizedStates(-randomIntBetween(1, Integer.MAX_VALUE)); + fail("max determinized state must be positive"); + } catch (IllegalArgumentException e) { + assertEquals(e.getMessage(), "maxDeterminizedStates must not be negative"); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/search/suggest/completion/WritableTestCase.java b/core/src/test/java/org/elasticsearch/search/suggest/completion/WritableTestCase.java new file mode 100644 index 00000000000..68cc30f8de4 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/suggest/completion/WritableTestCase.java @@ -0,0 +1,115 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.suggest.completion; + +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; + +/** + * Base class for testing serialization and equality for + * {@link Writeable} models + */ +public abstract class WritableTestCase extends ESTestCase { + + protected static final int NUMBER_OF_RUNS = 20; + + /** + * create random model that is put under test + */ + protected abstract M createTestModel(); + + /** + * mutate the given model so the returned model is different + */ + protected abstract M createMutation(M original) throws IOException; + + /** + * model prototype to read serialized format + */ + protected abstract M readFrom(StreamInput in) throws IOException; + + /** + * Test serialization and deserialization of the tested model. + */ + public void testSerialization() throws IOException { + for (int i = 0; i < NUMBER_OF_RUNS; i++) { + M testModel = createTestModel(); + M deserializedModel = copyModel(testModel); + assertEquals(testModel, deserializedModel); + assertEquals(testModel.hashCode(), deserializedModel.hashCode()); + assertNotSame(testModel, deserializedModel); + } + } + + /** + * Test equality and hashCode properties + */ + @SuppressWarnings("unchecked") + public void testEqualsAndHashcode() throws IOException { + M firstModel = createTestModel(); + String modelName = firstModel.getClass().getSimpleName(); + assertFalse(modelName + " is equal to null", firstModel.equals(null)); + assertFalse(modelName + " is equal to incompatible type", firstModel.equals("")); + assertTrue(modelName + " is not equal to self", firstModel.equals(firstModel)); + assertThat("same "+ modelName + "'s hashcode returns different values if called multiple times", firstModel.hashCode(), + equalTo(firstModel.hashCode())); + assertThat("different " + modelName + " should not be equal", createMutation(firstModel), not(equalTo(firstModel))); + + M secondModel = copyModel(firstModel); + assertTrue(modelName + " is not equal to self", secondModel.equals(secondModel)); + assertTrue(modelName + " is not equal to its copy", firstModel.equals(secondModel)); + assertTrue("equals is not symmetric", secondModel.equals(firstModel)); + assertThat(modelName + " copy's hashcode is different from original hashcode", secondModel.hashCode(), + equalTo(firstModel.hashCode())); + + M thirdModel = copyModel(secondModel); + assertTrue(modelName + " is not equal to self", thirdModel.equals(thirdModel)); + assertTrue(modelName + " is not equal to its copy", secondModel.equals(thirdModel)); + assertThat(modelName + " copy's hashcode is different from original hashcode", secondModel.hashCode(), + equalTo(thirdModel.hashCode())); + assertTrue("equals is not transitive", firstModel.equals(thirdModel)); + assertThat(modelName + " copy's hashcode is different from original hashcode", firstModel.hashCode(), + equalTo(thirdModel.hashCode())); + assertTrue(modelName + " equals is not symmetric", thirdModel.equals(secondModel)); + assertTrue(modelName + " equals is not symmetric", thirdModel.equals(firstModel)); + } + + private M copyModel(M original) throws IOException { + try (BytesStreamOutput output = new BytesStreamOutput()) { + original.writeTo(output); + try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(output.bytes()), provideNamedWritableRegistry())) { + return readFrom(in); + } + } + } + + protected NamedWriteableRegistry provideNamedWritableRegistry() { + return new NamedWriteableRegistry(); + } +} diff --git a/core/src/test/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorTests.java b/core/src/test/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorTests.java index ba3c3492476..4a47be481e0 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorTests.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.suggest.phrase; -import org.apache.lucene.analysis.core.WhitespaceAnalyzer; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -31,28 +30,14 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.analysis.AnalysisService; -import org.elasticsearch.index.analysis.NamedAnalyzer; -import org.elasticsearch.index.mapper.ContentPath; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.Mapper; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.core.StringFieldMapper; -import org.elasticsearch.index.mapper.core.StringFieldMapper.StringFieldType; -import org.elasticsearch.index.mapper.core.TextFieldMapper; import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.index.query.QueryShardContext; -import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.search.suggest.phrase.PhraseSuggestionContext.DirectCandidateGenerator; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.IndexSettingsModule; import java.io.IOException; import java.util.Arrays; import java.util.Collections; -import java.util.function.Consumer; import static org.hamcrest.Matchers.equalTo; @@ -148,73 +133,22 @@ public class DirectCandidateGeneratorTests extends ESTestCase{ } } - /** - * test that build() outputs a {@link DirectCandidateGenerator} that is similar to the one - * we would get when parsing the xContent the test generator is rendering out - */ - public void testBuild() throws IOException { - - long start = System.currentTimeMillis(); - IndexSettings idxSettings = IndexSettingsModule.newIndexSettings(randomAsciiOfLengthBetween(1, 10), Settings.EMPTY); - - AnalysisService mockAnalysisService = new AnalysisService(idxSettings, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap()) { - @Override - public NamedAnalyzer analyzer(String name) { - return new NamedAnalyzer(name, new WhitespaceAnalyzer()); - } - }; - - MapperService mockMapperService = new MapperService(idxSettings, mockAnalysisService , null, new IndicesModule().getMapperRegistry(), null) { - @Override - public MappedFieldType fullName(String fullName) { - return new StringFieldType(); - } - }; - - QueryShardContext mockShardContext = new QueryShardContext(idxSettings, null, null, mockMapperService, null, null, null) { - @Override - public MappedFieldType fieldMapper(String name) { - TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name); - return builder.build(new Mapper.BuilderContext(idxSettings.getSettings(), new ContentPath(1))).fieldType(); - } - }; - mockShardContext.setMapUnmappedFieldAsString(true); - - for (int runs = 0; runs < NUMBER_OF_RUNS; runs++) { - DirectCandidateGeneratorBuilder generator = randomCandidateGenerator(); - // first, build via DirectCandidateGenerator#build() - DirectCandidateGenerator contextGenerator = generator.build(mockShardContext); - - // second, render random test generator to xContent and parse using - // PhraseSuggestParser - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); - if (randomBoolean()) { - builder.prettyPrint(); - } - generator.toXContent(builder, ToXContent.EMPTY_PARAMS); - XContentParser parser = XContentHelper.createParser(builder.bytes()); - - DirectCandidateGenerator secondGenerator = PhraseSuggestParser.parseCandidateGenerator(parser, - mockShardContext.getMapperService(), mockShardContext.parseFieldMatcher()); - - // compare their properties - assertNotSame(contextGenerator, secondGenerator); - assertEquals(contextGenerator.field(), secondGenerator.field()); - assertEquals(contextGenerator.accuracy(), secondGenerator.accuracy(), Float.MIN_VALUE); - assertEquals(contextGenerator.maxTermFreq(), secondGenerator.maxTermFreq(), Float.MIN_VALUE); - assertEquals(contextGenerator.maxEdits(), secondGenerator.maxEdits()); - assertEquals(contextGenerator.maxInspections(), secondGenerator.maxInspections()); - assertEquals(contextGenerator.minDocFreq(), secondGenerator.minDocFreq(), Float.MIN_VALUE); - assertEquals(contextGenerator.minWordLength(), secondGenerator.minWordLength()); - assertEquals(contextGenerator.postFilter(), secondGenerator.postFilter()); - assertEquals(contextGenerator.prefixLength(), secondGenerator.prefixLength()); - assertEquals(contextGenerator.preFilter(), secondGenerator.preFilter()); - assertEquals(contextGenerator.sort(), secondGenerator.sort()); - assertEquals(contextGenerator.size(), secondGenerator.size()); - // some instances of StringDistance don't support equals, just checking the class here - assertEquals(contextGenerator.stringDistance().getClass(), secondGenerator.stringDistance().getClass()); - assertEquals(contextGenerator.suggestMode(), secondGenerator.suggestMode()); - } + public static void assertEqualGenerators(DirectCandidateGenerator first, DirectCandidateGenerator second) { + assertEquals(first.field(), second.field()); + assertEquals(first.accuracy(), second.accuracy(), Float.MIN_VALUE); + assertEquals(first.maxTermFreq(), second.maxTermFreq(), Float.MIN_VALUE); + assertEquals(first.maxEdits(), second.maxEdits()); + assertEquals(first.maxInspections(), second.maxInspections()); + assertEquals(first.minDocFreq(), second.minDocFreq(), Float.MIN_VALUE); + assertEquals(first.minWordLength(), second.minWordLength()); + assertEquals(first.postFilter(), second.postFilter()); + assertEquals(first.prefixLength(), second.prefixLength()); + assertEquals(first.preFilter(), second.preFilter()); + assertEquals(first.sort(), second.sort()); + assertEquals(first.size(), second.size()); + // some instances of StringDistance don't support equals, just checking the class here + assertEquals(first.stringDistance().getClass(), second.stringDistance().getClass()); + assertEquals(first.suggestMode(), second.suggestMode()); } /** @@ -306,12 +240,6 @@ public class DirectCandidateGeneratorTests extends ESTestCase{ return generator; } - private static void maybeSet(Consumer consumer, T value) { - if (randomBoolean()) { - consumer.accept(value); - } - } - private static DirectCandidateGeneratorBuilder serializedCopy(DirectCandidateGeneratorBuilder original) throws IOException { try (BytesStreamOutput output = new BytesStreamOutput()) { original.writeTo(output); diff --git a/core/src/test/java/org/elasticsearch/search/suggest/phrase/LaplaceModelTests.java b/core/src/test/java/org/elasticsearch/search/suggest/phrase/LaplaceModelTests.java index 87ad654e0cd..96ac0c9cb27 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/phrase/LaplaceModelTests.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/phrase/LaplaceModelTests.java @@ -19,15 +19,17 @@ package org.elasticsearch.search.suggest.phrase; -import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder.Laplace; -import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder.SmoothingModel; - import static org.hamcrest.Matchers.instanceOf; public class LaplaceModelTests extends SmoothingModelTestCase { @Override protected SmoothingModel createTestModel() { + return createRandomModel(); + } + + + static SmoothingModel createRandomModel() { return new Laplace(randomDoubleBetween(0.0, 10.0, false)); } diff --git a/core/src/test/java/org/elasticsearch/search/suggest/phrase/LinearInterpolationModelTests.java b/core/src/test/java/org/elasticsearch/search/suggest/phrase/LinearInterpolationModelTests.java index 1112b7a5ed7..ed663ef5241 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/phrase/LinearInterpolationModelTests.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/phrase/LinearInterpolationModelTests.java @@ -19,15 +19,16 @@ package org.elasticsearch.search.suggest.phrase; -import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder.LinearInterpolation; -import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder.SmoothingModel; - import static org.hamcrest.Matchers.instanceOf; public class LinearInterpolationModelTests extends SmoothingModelTestCase { @Override protected SmoothingModel createTestModel() { + return createRandomModel(); + } + + static LinearInterpolation createRandomModel() { double trigramLambda = randomDoubleBetween(0.0, 10.0, false); double bigramLambda = randomDoubleBetween(0.0, 10.0, false); double unigramLambda = randomDoubleBetween(0.0, 10.0, false); diff --git a/core/src/test/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellCheckerTests.java b/core/src/test/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellCheckerTests.java index 812928dee28..2143c7be9e0 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellCheckerTests.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellCheckerTests.java @@ -96,7 +96,7 @@ public class NoisyChannelSpellCheckerTests extends ESTestCase { writer.addDocument(doc); } - DirectoryReader ir = DirectoryReader.open(writer, false); + DirectoryReader ir = DirectoryReader.open(writer); WordScorer wordScorer = new LaplaceScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.95d, new BytesRef(" "), 0.5f); NoisyChannelSpellChecker suggester = new NoisyChannelSpellChecker(); @@ -238,7 +238,7 @@ public class NoisyChannelSpellCheckerTests extends ESTestCase { writer.addDocument(doc); } - DirectoryReader ir = DirectoryReader.open(writer, false); + DirectoryReader ir = DirectoryReader.open(writer); LaplaceScorer wordScorer = new LaplaceScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.95d, new BytesRef(" "), 0.5f); NoisyChannelSpellChecker suggester = new NoisyChannelSpellChecker(); DirectSpellChecker spellchecker = new DirectSpellChecker(); @@ -321,7 +321,7 @@ public class NoisyChannelSpellCheckerTests extends ESTestCase { writer.addDocument(doc); } - DirectoryReader ir = DirectoryReader.open(writer, false); + DirectoryReader ir = DirectoryReader.open(writer); WordScorer wordScorer = new LinearInterpoatingScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.5, 0.4, 0.1); NoisyChannelSpellChecker suggester = new NoisyChannelSpellChecker(); diff --git a/core/src/test/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilderTests.java b/core/src/test/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilderTests.java new file mode 100644 index 00000000000..36131c80483 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilderTests.java @@ -0,0 +1,242 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.suggest.phrase; + +import org.elasticsearch.script.Template; +import org.elasticsearch.search.suggest.AbstractSuggestionBuilderTestCase; +import org.elasticsearch.search.suggest.SuggestionSearchContext.SuggestionContext; +import org.elasticsearch.search.suggest.phrase.PhraseSuggestionContext.DirectCandidateGenerator; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; + +import static org.hamcrest.Matchers.instanceOf; + +public class PhraseSuggestionBuilderTests extends AbstractSuggestionBuilderTestCase { + + @BeforeClass + public static void initSmoothingModels() { + namedWriteableRegistry.registerPrototype(SmoothingModel.class, Laplace.PROTOTYPE); + namedWriteableRegistry.registerPrototype(SmoothingModel.class, LinearInterpolation.PROTOTYPE); + namedWriteableRegistry.registerPrototype(SmoothingModel.class, StupidBackoff.PROTOTYPE); + } + + @Override + protected PhraseSuggestionBuilder randomSuggestionBuilder() { + return randomPhraseSuggestionBuilder(); + } + + public static PhraseSuggestionBuilder randomPhraseSuggestionBuilder() { + PhraseSuggestionBuilder testBuilder = new PhraseSuggestionBuilder(randomAsciiOfLengthBetween(2, 20)); + setCommonPropertiesOnRandomBuilder(testBuilder); + maybeSet(testBuilder::maxErrors, randomFloat()); + maybeSet(testBuilder::separator, randomAsciiOfLengthBetween(1, 10)); + maybeSet(testBuilder::realWordErrorLikelihood, randomFloat()); + maybeSet(testBuilder::confidence, randomFloat()); + maybeSet(testBuilder::collateQuery, randomAsciiOfLengthBetween(3, 20)); + // collate query prune and parameters will only be used when query is set + if (testBuilder.collateQuery() != null) { + maybeSet(testBuilder::collatePrune, randomBoolean()); + if (randomBoolean()) { + Map collateParams = new HashMap<>(); + int numParams = randomIntBetween(1, 5); + for (int i = 0; i < numParams; i++) { + collateParams.put(randomAsciiOfLength(5), randomAsciiOfLength(5)); + } + testBuilder.collateParams(collateParams ); + } + } + if (randomBoolean()) { + // preTag, postTag + testBuilder.highlight(randomAsciiOfLengthBetween(3, 20), randomAsciiOfLengthBetween(3, 20)); + } + maybeSet(testBuilder::gramSize, randomIntBetween(1, 5)); + maybeSet(testBuilder::forceUnigrams, randomBoolean()); + maybeSet(testBuilder::tokenLimit, randomIntBetween(1, 20)); + if (randomBoolean()) { + testBuilder.smoothingModel(randomSmoothingModel()); + } + if (randomBoolean()) { + int numGenerators = randomIntBetween(1, 5); + for (int i = 0; i < numGenerators; i++) { + testBuilder.addCandidateGenerator(DirectCandidateGeneratorTests.randomCandidateGenerator()); + } + } + return testBuilder; + } + + private static SmoothingModel randomSmoothingModel() { + SmoothingModel model = null; + switch (randomIntBetween(0,2)) { + case 0: + model = LaplaceModelTests.createRandomModel(); + break; + case 1: + model = StupidBackoffModelTests.createRandomModel(); + break; + case 2: + model = LinearInterpolationModelTests.createRandomModel(); + break; + } + return model; + } + + @Override + protected void mutateSpecificParameters(PhraseSuggestionBuilder builder) throws IOException { + switch (randomIntBetween(0, 12)) { + case 0: + builder.maxErrors(randomValueOtherThan(builder.maxErrors(), () -> randomFloat())); + break; + case 1: + builder.realWordErrorLikelihood(randomValueOtherThan(builder.realWordErrorLikelihood(), () -> randomFloat())); + break; + case 2: + builder.confidence(randomValueOtherThan(builder.confidence(), () -> randomFloat())); + break; + case 3: + builder.gramSize(randomValueOtherThan(builder.gramSize(), () -> randomIntBetween(1, 5))); + break; + case 4: + builder.tokenLimit(randomValueOtherThan(builder.tokenLimit(), () -> randomIntBetween(1, 20))); + break; + case 5: + builder.separator(randomValueOtherThan(builder.separator(), () -> randomAsciiOfLengthBetween(1, 10))); + break; + case 6: + Template collateQuery = builder.collateQuery(); + if (collateQuery != null) { + builder.collateQuery(randomValueOtherThan(collateQuery.getScript(), () -> randomAsciiOfLengthBetween(3, 20))); + } else { + builder.collateQuery(randomAsciiOfLengthBetween(3, 20)); + } + break; + case 7: + builder.collatePrune(builder.collatePrune() == null ? randomBoolean() : !builder.collatePrune() ); + break; + case 8: + // preTag, postTag + String currentPre = builder.preTag(); + if (currentPre != null) { + // simply double both values + builder.highlight(builder.preTag() + builder.preTag(), builder.postTag() + builder.postTag()); + } else { + builder.highlight(randomAsciiOfLengthBetween(3, 20), randomAsciiOfLengthBetween(3, 20)); + } + break; + case 9: + builder.forceUnigrams(builder.forceUnigrams() == null ? randomBoolean() : ! builder.forceUnigrams()); + break; + case 10: + Map collateParams = builder.collateParams() == null ? new HashMap<>(1) : builder.collateParams(); + collateParams.put(randomAsciiOfLength(5), randomAsciiOfLength(5)); + builder.collateParams(collateParams); + break; + case 11: + builder.smoothingModel(randomValueOtherThan(builder.smoothingModel(), PhraseSuggestionBuilderTests::randomSmoothingModel)); + break; + case 12: + builder.addCandidateGenerator(DirectCandidateGeneratorTests.randomCandidateGenerator()); + break; + } + } + + public void testInvalidParameters() throws IOException { + // test missing field name + try { + new PhraseSuggestionBuilder(null); + fail("Should not allow null as field name"); + } catch (NullPointerException e) { + assertEquals("suggestion requires a field name", e.getMessage()); + } + + // test emtpy field name + try { + new PhraseSuggestionBuilder(""); + fail("Should not allow empty string as field name"); + } catch (IllegalArgumentException e) { + assertEquals("suggestion field name is empty", e.getMessage()); + } + + PhraseSuggestionBuilder builder = new PhraseSuggestionBuilder(randomAsciiOfLengthBetween(2, 20)); + try { + builder.gramSize(0); + fail("Should not allow gramSize < 1"); + } catch (IllegalArgumentException e) { + assertEquals("gramSize must be >= 1", e.getMessage()); + } + + try { + builder.gramSize(-1); + fail("Should not allow gramSize < 1"); + } catch (IllegalArgumentException e) { + assertEquals("gramSize must be >= 1", e.getMessage()); + } + + try { + builder.maxErrors(-1); + fail("Should not allow maxErrors < 0"); + } catch (IllegalArgumentException e) { + assertEquals("max_error must be > 0.0", e.getMessage()); + } + + try { + builder.separator(null); + fail("Should not allow null as separator"); + } catch (NullPointerException e) { + assertEquals("separator cannot be set to null", e.getMessage()); + } + + try { + builder.realWordErrorLikelihood(-1); + fail("Should not allow real world error likelihood < 0"); + } catch (IllegalArgumentException e) { + assertEquals("real_word_error_likelihood must be > 0.0", e.getMessage()); + } + + try { + builder.confidence(-1); + fail("Should not allow confidence < 0"); + } catch (IllegalArgumentException e) { + assertEquals("confidence must be >= 0.0", e.getMessage()); + } + + try { + builder.tokenLimit(0); + fail("token_limit must be >= 1"); + } catch (IllegalArgumentException e) { + assertEquals("token_limit must be >= 1", e.getMessage()); + } + + try { + if (randomBoolean()) { + builder.highlight(null, ""); + } else { + builder.highlight("", null); + } + fail("Pre and post tag must both be null or both not be null."); + } catch (IllegalArgumentException e) { + assertEquals("Pre and post tag must both be null or both not be null.", e.getMessage()); + } + } + +} diff --git a/core/src/test/java/org/elasticsearch/search/suggest/phrase/SmoothingModelTestCase.java b/core/src/test/java/org/elasticsearch/search/suggest/phrase/SmoothingModelTestCase.java index e4a8ae72b91..00009d1e76f 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/phrase/SmoothingModelTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/phrase/SmoothingModelTestCase.java @@ -45,10 +45,6 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.indices.query.IndicesQueriesRegistry; -import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder.Laplace; -import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder.LinearInterpolation; -import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder.SmoothingModel; -import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder.StupidBackoff; import org.elasticsearch.test.ESTestCase; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -97,7 +93,8 @@ public abstract class SmoothingModelTestCase extends ESTestCase { * Test that creates new smoothing model from a random test smoothing model and checks both for equality */ public void testFromXContent() throws IOException { - QueryParseContext context = new QueryParseContext(new IndicesQueriesRegistry(Settings.settingsBuilder().build(), Collections.emptyMap())); + QueryParseContext context = new QueryParseContext( + new IndicesQueriesRegistry(Settings.settingsBuilder().build(), Collections.emptyMap())); context.parseFieldMatcher(new ParseFieldMatcher(Settings.EMPTY)); SmoothingModel testModel = createTestModel(); @@ -113,7 +110,7 @@ public abstract class SmoothingModelTestCase extends ESTestCase { parser.nextToken(); // go to start token, real parsing would do that in the outer element parser SmoothingModel prototype = (SmoothingModel) namedWriteableRegistry.getPrototype(SmoothingModel.class, testModel.getWriteableName()); - SmoothingModel parsedModel = prototype.fromXContent(context); + SmoothingModel parsedModel = prototype.innerFromXContent(context); assertNotSame(testModel, parsedModel); assertEquals(testModel, parsedModel); assertEquals(testModel.hashCode(), parsedModel.hashCode()); @@ -132,9 +129,10 @@ public abstract class SmoothingModelTestCase extends ESTestCase { Document doc = new Document(); doc.add(new Field("field", "someText", TextField.TYPE_NOT_STORED)); writer.addDocument(doc); - DirectoryReader ir = DirectoryReader.open(writer, false); + DirectoryReader ir = DirectoryReader.open(writer); - WordScorer wordScorer = testModel.buildWordScorerFactory().newScorer(ir, MultiFields.getTerms(ir , "field"), "field", 0.9d, BytesRefs.toBytesRef(" ")); + WordScorer wordScorer = testModel.buildWordScorerFactory().newScorer(ir, MultiFields.getTerms(ir, "field"), "field", 0.9d, + BytesRefs.toBytesRef(" ")); assertWordScorer(wordScorer, testModel); } @@ -159,35 +157,39 @@ public abstract class SmoothingModelTestCase extends ESTestCase { */ @SuppressWarnings("unchecked") public void testEqualsAndHashcode() throws IOException { - SmoothingModel firstModel = createTestModel(); - assertFalse("smoothing model is equal to null", firstModel.equals(null)); - assertFalse("smoothing model is equal to incompatible type", firstModel.equals("")); - assertTrue("smoothing model is not equal to self", firstModel.equals(firstModel)); - assertThat("same smoothing model's hashcode returns different values if called multiple times", firstModel.hashCode(), - equalTo(firstModel.hashCode())); - assertThat("different smoothing models should not be equal", createMutation(firstModel), not(equalTo(firstModel))); + SmoothingModel firstModel = createTestModel(); + assertFalse("smoothing model is equal to null", firstModel.equals(null)); + assertFalse("smoothing model is equal to incompatible type", firstModel.equals("")); + assertTrue("smoothing model is not equal to self", firstModel.equals(firstModel)); + assertThat("same smoothing model's hashcode returns different values if called multiple times", firstModel.hashCode(), + equalTo(firstModel.hashCode())); + assertThat("different smoothing models should not be equal", createMutation(firstModel), not(equalTo(firstModel))); - SmoothingModel secondModel = copyModel(firstModel); - assertTrue("smoothing model is not equal to self", secondModel.equals(secondModel)); - assertTrue("smoothing model is not equal to its copy", firstModel.equals(secondModel)); - assertTrue("equals is not symmetric", secondModel.equals(firstModel)); - assertThat("smoothing model copy's hashcode is different from original hashcode", secondModel.hashCode(), equalTo(firstModel.hashCode())); + SmoothingModel secondModel = copyModel(firstModel); + assertTrue("smoothing model is not equal to self", secondModel.equals(secondModel)); + assertTrue("smoothing model is not equal to its copy", firstModel.equals(secondModel)); + assertTrue("equals is not symmetric", secondModel.equals(firstModel)); + assertThat("smoothing model copy's hashcode is different from original hashcode", secondModel.hashCode(), + equalTo(firstModel.hashCode())); - SmoothingModel thirdModel = copyModel(secondModel); - assertTrue("smoothing model is not equal to self", thirdModel.equals(thirdModel)); - assertTrue("smoothing model is not equal to its copy", secondModel.equals(thirdModel)); - assertThat("smoothing model copy's hashcode is different from original hashcode", secondModel.hashCode(), equalTo(thirdModel.hashCode())); - assertTrue("equals is not transitive", firstModel.equals(thirdModel)); - assertThat("smoothing model copy's hashcode is different from original hashcode", firstModel.hashCode(), equalTo(thirdModel.hashCode())); - assertTrue("equals is not symmetric", thirdModel.equals(secondModel)); - assertTrue("equals is not symmetric", thirdModel.equals(firstModel)); + SmoothingModel thirdModel = copyModel(secondModel); + assertTrue("smoothing model is not equal to self", thirdModel.equals(thirdModel)); + assertTrue("smoothing model is not equal to its copy", secondModel.equals(thirdModel)); + assertThat("smoothing model copy's hashcode is different from original hashcode", secondModel.hashCode(), + equalTo(thirdModel.hashCode())); + assertTrue("equals is not transitive", firstModel.equals(thirdModel)); + assertThat("smoothing model copy's hashcode is different from original hashcode", firstModel.hashCode(), + equalTo(thirdModel.hashCode())); + assertTrue("equals is not symmetric", thirdModel.equals(secondModel)); + assertTrue("equals is not symmetric", thirdModel.equals(firstModel)); } static SmoothingModel copyModel(SmoothingModel original) throws IOException { try (BytesStreamOutput output = new BytesStreamOutput()) { original.writeTo(output); try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(output.bytes()), namedWriteableRegistry)) { - SmoothingModel prototype = (SmoothingModel) namedWriteableRegistry.getPrototype(SmoothingModel.class, original.getWriteableName()); + SmoothingModel prototype = (SmoothingModel) namedWriteableRegistry.getPrototype(SmoothingModel.class, + original.getWriteableName()); return prototype.readFrom(in); } } diff --git a/core/src/test/java/org/elasticsearch/search/suggest/phrase/StupidBackoffModelTests.java b/core/src/test/java/org/elasticsearch/search/suggest/phrase/StupidBackoffModelTests.java index c3bd66d2a81..1b6e1cf2c88 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/phrase/StupidBackoffModelTests.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/phrase/StupidBackoffModelTests.java @@ -19,15 +19,16 @@ package org.elasticsearch.search.suggest.phrase; -import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder.SmoothingModel; -import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder.StupidBackoff; - import static org.hamcrest.Matchers.instanceOf; public class StupidBackoffModelTests extends SmoothingModelTestCase { @Override protected SmoothingModel createTestModel() { + return createRandomModel(); + } + + static SmoothingModel createRandomModel() { return new StupidBackoff(randomDoubleBetween(0.0, 10.0, false)); } diff --git a/core/src/test/java/org/elasticsearch/search/suggest/term/SortByTests.java b/core/src/test/java/org/elasticsearch/search/suggest/term/SortByTests.java new file mode 100644 index 00000000000..870a96dfe73 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/suggest/term/SortByTests.java @@ -0,0 +1,70 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.suggest.term; + +import org.elasticsearch.common.io.stream.AbstractWriteableEnumTestCase; +import org.elasticsearch.search.suggest.SortBy; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; + +/** + * Test the {@link SortBy} enum. + */ +public class SortByTests extends AbstractWriteableEnumTestCase { + + @Override + public void testValidOrdinals() { + assertThat(SortBy.SCORE.ordinal(), equalTo(0)); + assertThat(SortBy.FREQUENCY.ordinal(), equalTo(1)); + } + + @Override + public void testFromString() { + assertThat(SortBy.resolve("score"), equalTo(SortBy.SCORE)); + assertThat(SortBy.resolve("frequency"), equalTo(SortBy.FREQUENCY)); + final String doesntExist = "doesnt_exist"; + try { + SortBy.resolve(doesntExist); + fail("SortBy should not have an element " + doesntExist); + } catch (IllegalArgumentException e) { + } + try { + SortBy.resolve(null); + fail("SortBy.resolve on a null value should throw an exception."); + } catch (NullPointerException e) { + assertThat(e.getMessage(), equalTo("Input string is null")); + } + } + + @Override + public void testWriteTo() throws IOException { + assertWriteToStream(SortBy.SCORE, 0); + assertWriteToStream(SortBy.FREQUENCY, 1); + } + + @Override + public void testReadFrom() throws IOException { + assertReadFromStream(0, SortBy.SCORE); + assertReadFromStream(1, SortBy.FREQUENCY); + } + +} diff --git a/core/src/test/java/org/elasticsearch/search/suggest/term/StringDistanceImplTests.java b/core/src/test/java/org/elasticsearch/search/suggest/term/StringDistanceImplTests.java new file mode 100644 index 00000000000..4917b2860fb --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/suggest/term/StringDistanceImplTests.java @@ -0,0 +1,82 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.suggest.term; + +import org.elasticsearch.common.io.stream.AbstractWriteableEnumTestCase; + +import java.io.IOException; + +import static org.elasticsearch.search.suggest.term.TermSuggestionBuilder.StringDistanceImpl; +import static org.hamcrest.Matchers.equalTo; + +/** + * Test for the {@link StringDistanceImpl} enum. + */ +public class StringDistanceImplTests extends AbstractWriteableEnumTestCase { + + @Override + public void testValidOrdinals() { + assertThat(StringDistanceImpl.INTERNAL.ordinal(), equalTo(0)); + assertThat(StringDistanceImpl.DAMERAU_LEVENSHTEIN.ordinal(), equalTo(1)); + assertThat(StringDistanceImpl.LEVENSTEIN.ordinal(), equalTo(2)); + assertThat(StringDistanceImpl.JAROWINKLER.ordinal(), equalTo(3)); + assertThat(StringDistanceImpl.NGRAM.ordinal(), equalTo(4)); + } + + @Override + public void testFromString() { + assertThat(StringDistanceImpl.resolve("internal"), equalTo(StringDistanceImpl.INTERNAL)); + assertThat(StringDistanceImpl.resolve("damerau_levenshtein"), equalTo(StringDistanceImpl.DAMERAU_LEVENSHTEIN)); + assertThat(StringDistanceImpl.resolve("levenstein"), equalTo(StringDistanceImpl.LEVENSTEIN)); + assertThat(StringDistanceImpl.resolve("jarowinkler"), equalTo(StringDistanceImpl.JAROWINKLER)); + assertThat(StringDistanceImpl.resolve("ngram"), equalTo(StringDistanceImpl.NGRAM)); + final String doesntExist = "doesnt_exist"; + try { + StringDistanceImpl.resolve(doesntExist); + fail("StringDistanceImpl should not have an element " + doesntExist); + } catch (IllegalArgumentException e) { + } + try { + StringDistanceImpl.resolve(null); + fail("StringDistanceImpl.resolve on a null value should throw an exception."); + } catch (NullPointerException e) { + assertThat(e.getMessage(), equalTo("Input string is null")); + } + } + + @Override + public void testWriteTo() throws IOException { + assertWriteToStream(StringDistanceImpl.INTERNAL, 0); + assertWriteToStream(StringDistanceImpl.DAMERAU_LEVENSHTEIN, 1); + assertWriteToStream(StringDistanceImpl.LEVENSTEIN, 2); + assertWriteToStream(StringDistanceImpl.JAROWINKLER, 3); + assertWriteToStream(StringDistanceImpl.NGRAM, 4); + } + + @Override + public void testReadFrom() throws IOException { + assertReadFromStream(0, StringDistanceImpl.INTERNAL); + assertReadFromStream(1, StringDistanceImpl.DAMERAU_LEVENSHTEIN); + assertReadFromStream(2, StringDistanceImpl.LEVENSTEIN); + assertReadFromStream(3, StringDistanceImpl.JAROWINKLER); + assertReadFromStream(4, StringDistanceImpl.NGRAM); + } + +} diff --git a/core/src/test/java/org/elasticsearch/search/suggest/term/SuggestModeTests.java b/core/src/test/java/org/elasticsearch/search/suggest/term/SuggestModeTests.java new file mode 100644 index 00000000000..ca2274ea4aa --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/suggest/term/SuggestModeTests.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.suggest.term; + +import org.elasticsearch.common.io.stream.AbstractWriteableEnumTestCase; + +import java.io.IOException; + +import static org.elasticsearch.search.suggest.term.TermSuggestionBuilder.SuggestMode; +import static org.hamcrest.Matchers.equalTo; + +/** + * Test the {@link SuggestMode} enum. + */ +public class SuggestModeTests extends AbstractWriteableEnumTestCase { + + @Override + public void testValidOrdinals() { + assertThat(SuggestMode.MISSING.ordinal(), equalTo(0)); + assertThat(SuggestMode.POPULAR.ordinal(), equalTo(1)); + assertThat(SuggestMode.ALWAYS.ordinal(), equalTo(2)); + } + + @Override + public void testFromString() { + assertThat(SuggestMode.resolve("missing"), equalTo(SuggestMode.MISSING)); + assertThat(SuggestMode.resolve("popular"), equalTo(SuggestMode.POPULAR)); + assertThat(SuggestMode.resolve("always"), equalTo(SuggestMode.ALWAYS)); + final String doesntExist = "doesnt_exist"; + try { + SuggestMode.resolve(doesntExist); + fail("SuggestMode should not have an element " + doesntExist); + } catch (IllegalArgumentException e) { + } + try { + SuggestMode.resolve(null); + fail("SuggestMode.resolve on a null value should throw an exception."); + } catch (NullPointerException e) { + assertThat(e.getMessage(), equalTo("Input string is null")); + } + } + + @Override + public void testWriteTo() throws IOException { + assertWriteToStream(SuggestMode.MISSING, 0); + assertWriteToStream(SuggestMode.POPULAR, 1); + assertWriteToStream(SuggestMode.ALWAYS, 2); + } + + @Override + public void testReadFrom() throws IOException { + assertReadFromStream(0, SuggestMode.MISSING); + assertReadFromStream(1, SuggestMode.POPULAR); + assertReadFromStream(2, SuggestMode.ALWAYS); + } + +} diff --git a/core/src/test/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilderTests.java b/core/src/test/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilderTests.java new file mode 100644 index 00000000000..5e910905d40 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilderTests.java @@ -0,0 +1,325 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.suggest.term; + +import com.carrotsearch.randomizedtesting.generators.RandomStrings; +import org.elasticsearch.search.suggest.AbstractSuggestionBuilderTestCase; +import org.elasticsearch.search.suggest.DirectSpellcheckerSettings; +import org.elasticsearch.search.suggest.SortBy; +import org.elasticsearch.search.suggest.SuggestBuilder; +import org.elasticsearch.search.suggest.SuggestionSearchContext.SuggestionContext; +import org.elasticsearch.search.suggest.term.TermSuggestionBuilder.StringDistanceImpl; +import org.elasticsearch.search.suggest.term.TermSuggestionBuilder.SuggestMode; + +import java.io.IOException; +import java.util.Locale; + +import static org.elasticsearch.search.suggest.DirectSpellcheckerSettings.DEFAULT_ACCURACY; +import static org.elasticsearch.search.suggest.DirectSpellcheckerSettings.DEFAULT_MAX_EDITS; +import static org.elasticsearch.search.suggest.DirectSpellcheckerSettings.DEFAULT_MAX_INSPECTIONS; +import static org.elasticsearch.search.suggest.DirectSpellcheckerSettings.DEFAULT_MAX_TERM_FREQ; +import static org.elasticsearch.search.suggest.DirectSpellcheckerSettings.DEFAULT_MIN_DOC_FREQ; +import static org.elasticsearch.search.suggest.DirectSpellcheckerSettings.DEFAULT_MIN_WORD_LENGTH; +import static org.elasticsearch.search.suggest.DirectSpellcheckerSettings.DEFAULT_PREFIX_LENGTH; +import static org.hamcrest.Matchers.containsString; + +/** + * Test the {@link TermSuggestionBuilder} class. + */ +public class TermSuggestionBuilderTests extends AbstractSuggestionBuilderTestCase { + + /** + * creates random suggestion builder, renders it to xContent and back to new instance that should be equal to original + */ + @Override + protected TermSuggestionBuilder randomSuggestionBuilder() { + return randomTermSuggestionBuilder(); + } + + /** + * Creates a random TermSuggestionBuilder + */ + public static TermSuggestionBuilder randomTermSuggestionBuilder() { + TermSuggestionBuilder testBuilder = new TermSuggestionBuilder(randomAsciiOfLengthBetween(2, 20)); + setCommonPropertiesOnRandomBuilder(testBuilder); + maybeSet(testBuilder::suggestMode, randomSuggestMode()); + maybeSet(testBuilder::accuracy, randomFloat()); + maybeSet(testBuilder::sort, randomSort()); + maybeSet(testBuilder::stringDistance, randomStringDistance()); + maybeSet(testBuilder::maxEdits, randomIntBetween(1, 2)); + maybeSet(testBuilder::maxInspections, randomInt(Integer.MAX_VALUE)); + maybeSet(testBuilder::maxTermFreq, randomFloat()); + maybeSet(testBuilder::prefixLength, randomInt(Integer.MAX_VALUE)); + maybeSet(testBuilder::minWordLength, randomInt(Integer.MAX_VALUE)); + maybeSet(testBuilder::minDocFreq, randomFloat()); + return testBuilder; + } + + private static SuggestMode randomSuggestMode() { + final int randomVal = randomIntBetween(0, 2); + switch (randomVal) { + case 0: return SuggestMode.MISSING; + case 1: return SuggestMode.POPULAR; + case 2: return SuggestMode.ALWAYS; + default: throw new IllegalArgumentException("No suggest mode with an ordinal of " + randomVal); + } + } + + private static SortBy randomSort() { + int randomVal = randomIntBetween(0, 1); + switch (randomVal) { + case 0: return SortBy.SCORE; + case 1: return SortBy.FREQUENCY; + default: throw new IllegalArgumentException("No sort mode with an ordinal of " + randomVal); + } + } + + private static StringDistanceImpl randomStringDistance() { + int randomVal = randomIntBetween(0, 4); + switch (randomVal) { + case 0: return StringDistanceImpl.INTERNAL; + case 1: return StringDistanceImpl.DAMERAU_LEVENSHTEIN; + case 2: return StringDistanceImpl.LEVENSTEIN; + case 3: return StringDistanceImpl.JAROWINKLER; + case 4: return StringDistanceImpl.NGRAM; + default: throw new IllegalArgumentException("No string distance algorithm with an ordinal of " + randomVal); + } + } + + @Override + protected void mutateSpecificParameters(TermSuggestionBuilder builder) throws IOException { + switch (randomIntBetween(0, 9)) { + case 0: + builder.suggestMode(randomValueOtherThan(builder.suggestMode(), () -> randomSuggestMode())); + break; + case 1: + builder.accuracy(randomValueOtherThan(builder.accuracy(), () -> randomFloat())); + break; + case 2: + builder.sort(randomValueOtherThan(builder.sort(), () -> randomSort())); + break; + case 3: + builder.stringDistance(randomValueOtherThan(builder.stringDistance(), () -> randomStringDistance())); + break; + case 4: + builder.maxEdits(randomValueOtherThan(builder.maxEdits(), () -> randomIntBetween(1, 2))); + break; + case 5: + builder.maxInspections(randomValueOtherThan(builder.maxInspections(), () -> randomInt(Integer.MAX_VALUE))); + break; + case 6: + builder.maxTermFreq(randomValueOtherThan(builder.maxTermFreq(), () -> randomFloat())); + break; + case 7: + builder.prefixLength(randomValueOtherThan(builder.prefixLength(), () -> randomInt(Integer.MAX_VALUE))); + break; + case 8: + builder.minWordLength(randomValueOtherThan(builder.minWordLength(), () -> randomInt(Integer.MAX_VALUE))); + break; + case 9: + builder.minDocFreq(randomValueOtherThan(builder.minDocFreq(), () -> randomFloat())); + break; + default: + break; // do nothing + } + } + + public void testInvalidParameters() throws IOException { + // test missing field name + try { + new TermSuggestionBuilder(null); + fail("Should not allow null as field name"); + } catch (NullPointerException e) { + assertEquals("suggestion requires a field name", e.getMessage()); + } + + // test emtpy field name + try { + new TermSuggestionBuilder(""); + fail("Should not allow empty string as field name"); + } catch (IllegalArgumentException e) { + assertEquals("suggestion field name is empty", e.getMessage()); + } + + TermSuggestionBuilder builder = new TermSuggestionBuilder(randomAsciiOfLengthBetween(2, 20)); + // test invalid accuracy values + try { + builder.accuracy(-0.5f); + fail("Should not allow accuracy to be set to a negative value."); + } catch (IllegalArgumentException e) { + } + try { + builder.accuracy(1.1f); + fail("Should not allow accuracy to be greater than 1.0."); + } catch (IllegalArgumentException e) { + } + // test invalid max edit distance values + try { + builder.maxEdits(0); + fail("Should not allow maxEdits to be less than 1."); + } catch (IllegalArgumentException e) { + } + try { + builder.maxEdits(-1); + fail("Should not allow maxEdits to be a negative value."); + } catch (IllegalArgumentException e) { + } + try { + builder.maxEdits(3); + fail("Should not allow maxEdits to be greater than 2."); + } catch (IllegalArgumentException e) { + } + // test invalid max inspections values + try { + builder.maxInspections(-1); + fail("Should not allow maxInspections to be a negative value."); + } catch (IllegalArgumentException e) { + } + // test invalid max term freq values + try { + builder.maxTermFreq(-0.5f); + fail("Should not allow max term freq to be a negative value."); + } catch (IllegalArgumentException e) { + } + try { + builder.maxTermFreq(1.5f); + fail("If max term freq is greater than 1, it must be a whole number."); + } catch (IllegalArgumentException e) { + } + try { + builder.maxTermFreq(2.0f); // this should be allowed + } catch (IllegalArgumentException e) { + fail("A max term freq greater than 1 that is a whole number should be allowed."); + } + // test invalid min doc freq values + try { + builder.minDocFreq(-0.5f); + fail("Should not allow min doc freq to be a negative value."); + } catch (IllegalArgumentException e) { + } + try { + builder.minDocFreq(1.5f); + fail("If min doc freq is greater than 1, it must be a whole number."); + } catch (IllegalArgumentException e) { + } + try { + builder.minDocFreq(2.0f); // this should be allowed + } catch (IllegalArgumentException e) { + fail("A min doc freq greater than 1 that is a whole number should be allowed."); + } + // test invalid min word length values + try { + builder.minWordLength(0); + fail("A min word length < 1 should not be allowed."); + } catch (IllegalArgumentException e) { + } + try { + builder.minWordLength(-1); + fail("Should not allow min word length to be a negative value."); + } catch (IllegalArgumentException e) { + } + // test invalid prefix length values + try { + builder.prefixLength(-1); + fail("Should not allow prefix length to be a negative value."); + } catch (IllegalArgumentException e) { + } + // test invalid size values + try { + builder.size(0); + fail("Size must be a positive value."); + } catch (IllegalArgumentException e) { + } + try { + builder.size(-1); + fail("Size must be a positive value."); + } catch (IllegalArgumentException e) { + } + // null values not allowed for enums + try { + builder.sort(null); + fail("Should not allow setting a null sort value."); + } catch (NullPointerException e) { + } + try { + builder.stringDistance(null); + fail("Should not allow setting a null string distance value."); + } catch (NullPointerException e) { + } + try { + builder.suggestMode(null); + fail("Should not allow setting a null suggest mode value."); + } catch (NullPointerException e) { + } + } + + public void testDefaultValuesSet() { + TermSuggestionBuilder builder = new TermSuggestionBuilder(randomAsciiOfLengthBetween(2, 20)); + assertEquals(DEFAULT_ACCURACY, builder.accuracy(), Float.MIN_VALUE); + assertEquals(DEFAULT_MAX_EDITS, builder.maxEdits()); + assertEquals(DEFAULT_MAX_INSPECTIONS, builder.maxInspections()); + assertEquals(DEFAULT_MAX_TERM_FREQ, builder.maxTermFreq(), Float.MIN_VALUE); + assertEquals(DEFAULT_MIN_DOC_FREQ, builder.minDocFreq(), Float.MIN_VALUE); + assertEquals(DEFAULT_MIN_WORD_LENGTH, builder.minWordLength()); + assertEquals(DEFAULT_PREFIX_LENGTH, builder.prefixLength()); + assertEquals(SortBy.SCORE, builder.sort()); + assertEquals(StringDistanceImpl.INTERNAL, builder.stringDistance()); + assertEquals(SuggestMode.MISSING, builder.suggestMode()); + } + + public void testMalformedJson() { + final String field = RandomStrings.randomAsciiOfLength(getRandom(), 10).toLowerCase(Locale.ROOT); + String suggest = "{\n" + + " \"bad-payload\" : {\n" + + " \"text\" : \"the amsterdma meetpu\",\n" + + " \"term\" : {\n" + + " \"field\" : { \"" + field + "\" : \"bad-object\" }\n" + + " }\n" + + " }\n" + + "}"; + try { + final SuggestBuilder suggestBuilder = SuggestBuilder.fromXContent(newParseContext(suggest), suggesters); + fail("Should not have been able to create SuggestBuilder from malformed JSON: " + suggestBuilder); + } catch (Exception e) { + assertThat(e.getMessage(), containsString("parsing failed")); + } + } + + private void assertSpellcheckerSettings(DirectSpellcheckerSettings oldSettings, DirectSpellcheckerSettings newSettings) { + final double delta = 0.0d; + // make sure the objects aren't the same + assertNotSame(oldSettings, newSettings); + // make sure the objects aren't null + assertNotNull(oldSettings); + assertNotNull(newSettings); + // and now, make sure they are equal.. + assertEquals(oldSettings.accuracy(), newSettings.accuracy(), delta); + assertEquals(oldSettings.maxEdits(), newSettings.maxEdits()); + assertEquals(oldSettings.maxInspections(), newSettings.maxInspections()); + assertEquals(oldSettings.maxTermFreq(), newSettings.maxTermFreq(), delta); + assertEquals(oldSettings.minDocFreq(), newSettings.minDocFreq(), delta); + assertEquals(oldSettings.minWordLength(), newSettings.minWordLength()); + assertEquals(oldSettings.prefixLength(), newSettings.prefixLength()); + assertEquals(oldSettings.sort(), newSettings.sort()); + assertEquals(oldSettings.stringDistance().getClass(), newSettings.stringDistance().getClass()); + assertEquals(oldSettings.suggestMode().getClass(), newSettings.suggestMode().getClass()); + } + +} diff --git a/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index 7e9bd14f9f3..43056d1fcb1 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -21,13 +21,13 @@ package org.elasticsearch.snapshots; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse; import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.metadata.SnapshotId; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.PendingClusterTask; import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; @@ -137,6 +137,32 @@ public abstract class AbstractSnapshotIntegTestCase extends ESIntegTestCase { return null; } + public static void blockAllDataNodes(String repository) { + for(RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { + ((MockRepository)repositoriesService.repository(repository)).blockOnDataFiles(true); + } + } + + public static void unblockAllDataNodes(String repository) { + for(RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { + ((MockRepository)repositoriesService.repository(repository)).unblock(); + } + } + + public void waitForBlockOnAnyDataNode(String repository, TimeValue timeout) throws InterruptedException { + if (false == awaitBusy(() -> { + for(RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { + MockRepository mockRepository = (MockRepository) repositoriesService.repository(repository); + if (mockRepository.blocked()) { + return true; + } + } + return false; + }, timeout.millis(), TimeUnit.MILLISECONDS)) { + fail("Timeout waiting for repository block on any data node!!!"); + } + } + public static void unblockNode(String node) { ((MockRepository)internalCluster().getInstance(RepositoriesService.class, node).repository("test-repo")).unblock(); } diff --git a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index bd6c2533652..0acc32887ef 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -31,12 +31,12 @@ import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; @@ -325,7 +325,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest logger.info("--> execution was blocked on node [{}], shutting it down", blockedNode); unblockNode(blockedNode); - logger.info("--> stopping node", blockedNode); + logger.info("--> stopping node [{}]", blockedNode); stopNode(blockedNode); logger.info("--> waiting for completion"); SnapshotInfo snapshotInfo = waitForCompletion("test-repo", "test-snap", TimeValue.timeValueSeconds(60)); @@ -379,7 +379,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest // Make sure that abort makes some progress Thread.sleep(100); unblockNode(blockedNode); - logger.info("--> stopping node", blockedNode); + logger.info("--> stopping node [{}]", blockedNode); stopNode(blockedNode); try { DeleteSnapshotResponse deleteSnapshotResponse = deleteSnapshotResponseFuture.actionGet(); @@ -632,8 +632,8 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest client().admin().cluster().preparePutRepository("test-repo") .setType("mock").setSettings(Settings.settingsBuilder() .put("location", randomRepoPath()) - .put("secret.mock.username", "notsecretusername") - .put("secret.mock.password", "verysecretpassword") + .put(MockRepository.Plugin.USERNAME_SETTING.getKey(), "notsecretusername") + .put(MockRepository.Plugin.PASSWORD_SETTING.getKey(), "verysecretpassword") ).get(); RestGetRepositoriesAction getRepoAction = internalCluster().getInstance(RestGetRepositoriesAction.class); diff --git a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 65337d4b632..cc52daad5b0 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -40,7 +40,6 @@ import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResp import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.SnapshotsInProgress; @@ -52,6 +51,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; import org.elasticsearch.cluster.metadata.SnapshotId; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; @@ -761,7 +761,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas Client client = client(); Path repo = randomRepoPath(); - logger.info("--> creating repository at " + repo.toAbsolutePath()); + logger.info("--> creating repository at {}", repo.toAbsolutePath()); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder() .put("location", repo) @@ -817,7 +817,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas Client client = client(); Path repo = randomRepoPath(); - logger.info("--> creating repository at " + repo.toAbsolutePath()); + logger.info("--> creating repository at {}", repo.toAbsolutePath()); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder() .put("location", repo) @@ -855,7 +855,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas Client client = client(); Path repo = randomRepoPath(); - logger.info("--> creating repository at " + repo.toAbsolutePath()); + logger.info("--> creating repository at {}", repo.toAbsolutePath()); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder() .put("location", repo) @@ -889,7 +889,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas Client client = client(); Path repo = randomRepoPath(); - logger.info("--> creating repository at " + repo.toAbsolutePath()); + logger.info("--> creating repository at {}", repo.toAbsolutePath()); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder() .put("location", repo) @@ -1448,7 +1448,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas } } - logger.info("--> checking snapshot status for all currently running and snapshot with empty repository", blockedNode); + logger.info("--> checking snapshot status for all currently running and snapshot with empty repository"); response = client.admin().cluster().prepareSnapshotStatus().execute().actionGet(); assertThat(response.getSnapshots().size(), equalTo(1)); snapshotStatus = response.getSnapshots().get(0); @@ -1461,7 +1461,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas } } - logger.info("--> checking that _current returns the currently running snapshot", blockedNode); + logger.info("--> checking that _current returns the currently running snapshot"); GetSnapshotsResponse getResponse = client.admin().cluster().prepareGetSnapshots("test-repo").setCurrentSnapshot().execute().actionGet(); assertThat(getResponse.getSnapshots().size(), equalTo(1)); SnapshotInfo snapshotInfo = getResponse.getSnapshots().get(0); @@ -1475,7 +1475,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas logger.info("--> done"); - logger.info("--> checking snapshot status again after snapshot is done", blockedNode); + logger.info("--> checking snapshot status again after snapshot is done"); response = client.admin().cluster().prepareSnapshotStatus("test-repo").addSnapshots("test-snap").execute().actionGet(); snapshotStatus = response.getSnapshots().get(0); assertThat(snapshotStatus.getIndices().size(), equalTo(1)); @@ -1486,11 +1486,11 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas assertThat(indexStatus.getShardsStats().getDoneShards(), equalTo(snapshotInfo.successfulShards())); assertThat(indexStatus.getShards().size(), equalTo(snapshotInfo.totalShards())); - logger.info("--> checking snapshot status after it is done with empty repository", blockedNode); + logger.info("--> checking snapshot status after it is done with empty repository"); response = client.admin().cluster().prepareSnapshotStatus().execute().actionGet(); assertThat(response.getSnapshots().size(), equalTo(0)); - logger.info("--> checking that _current no longer returns the snapshot", blockedNode); + logger.info("--> checking that _current no longer returns the snapshot"); assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("_current").execute().actionGet().getSnapshots().isEmpty(), equalTo(true)); try { @@ -1813,19 +1813,31 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas } } - public void testDeleteIndexDuringSnapshot() throws Exception { + public void testCloseOrDeleteIndexDuringSnapshot() throws Exception { Client client = client(); boolean allowPartial = randomBoolean(); - logger.info("--> creating repository"); - assertAcked(client.admin().cluster().preparePutRepository("test-repo") + + // only block on repo init if we have partial snapshot or we run into deadlock when acquiring shard locks for index deletion/closing + boolean initBlocking = allowPartial || randomBoolean(); + if (initBlocking) { + assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("mock").setSettings(Settings.settingsBuilder() - .put("location", randomRepoPath()) - .put("compress", randomBoolean()) - .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - .put("block_on_init", true) + .put("location", randomRepoPath()) + .put("compress", randomBoolean()) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put("block_on_init", true) )); + } else { + assertAcked(client.admin().cluster().preparePutRepository("test-repo") + .setType("mock").setSettings(Settings.settingsBuilder() + .put("location", randomRepoPath()) + .put("compress", randomBoolean()) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put("block_on_data", true) + )); + } createIndex("test-idx-1", "test-idx-2", "test-idx-3"); ensureGreen(); @@ -1843,28 +1855,131 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas logger.info("--> snapshot allow partial {}", allowPartial); ListenableActionFuture future = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") - .setIndices("test-idx-*").setWaitForCompletion(true).setPartial(allowPartial).execute(); + .setIndices("test-idx-*").setWaitForCompletion(true).setPartial(allowPartial).execute(); logger.info("--> wait for block to kick in"); - waitForBlock(internalCluster().getMasterName(), "test-repo", TimeValue.timeValueMinutes(1)); - logger.info("--> delete some indices while snapshot is running"); - client.admin().indices().prepareDelete("test-idx-1", "test-idx-2").get(); - logger.info("--> unblock running master node"); - unblockNode(internalCluster().getMasterName()); + if (initBlocking) { + waitForBlock(internalCluster().getMasterName(), "test-repo", TimeValue.timeValueMinutes(1)); + } else { + waitForBlockOnAnyDataNode("test-repo", TimeValue.timeValueMinutes(1)); + } + try { + if (allowPartial) { + // partial snapshots allow close / delete operations + if (randomBoolean()) { + logger.info("--> delete index while partial snapshot is running"); + client.admin().indices().prepareDelete("test-idx-1").get(); + } else { + logger.info("--> close index while partial snapshot is running"); + client.admin().indices().prepareClose("test-idx-1").get(); + } + } else { + // non-partial snapshots do not allow close / delete operations on indices where snapshot has not been completed + if (randomBoolean()) { + try { + logger.info("--> delete index while non-partial snapshot is running"); + client.admin().indices().prepareDelete("test-idx-1").get(); + fail("Expected deleting index to fail during snapshot"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("Cannot delete indices that are being snapshotted: [[test-idx-1/")); + } + } else { + try { + logger.info("--> close index while non-partial snapshot is running"); + client.admin().indices().prepareClose("test-idx-1").get(); + fail("Expected closing index to fail during snapshot"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("Cannot close indices that are being snapshotted: [[test-idx-1/")); + } + } + } + } finally { + if (initBlocking) { + logger.info("--> unblock running master node"); + unblockNode(internalCluster().getMasterName()); + } else { + logger.info("--> unblock all data nodes"); + unblockAllDataNodes("test-repo"); + } + } logger.info("--> waiting for snapshot to finish"); CreateSnapshotResponse createSnapshotResponse = future.get(); if (allowPartial) { - logger.info("Deleted index during snapshot, but allow partial"); + logger.info("Deleted/Closed index during snapshot, but allow partial"); assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo((SnapshotState.PARTIAL))); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponse.getSnapshotInfo().failedShards(), greaterThan(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), lessThan(createSnapshotResponse.getSnapshotInfo().totalShards())); } else { - logger.info("Deleted index during snapshot and doesn't allow partial"); - assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo((SnapshotState.FAILED))); + logger.info("Snapshot successfully completed"); + assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo((SnapshotState.SUCCESS))); } } + public void testCloseIndexDuringRestore() throws Exception { + Client client = client(); + + logger.info("--> creating repository"); + assertAcked(client.admin().cluster().preparePutRepository("test-repo") + .setType("mock").setSettings(Settings.settingsBuilder() + .put("location", randomRepoPath()) + .put("compress", randomBoolean()) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + )); + + createIndex("test-idx-1", "test-idx-2"); + ensureGreen(); + + logger.info("--> indexing some data"); + for (int i = 0; i < 100; i++) { + index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i); + index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i); + } + refresh(); + assertThat(client.prepareSearch("test-idx-1").setSize(0).get().getHits().totalHits(), equalTo(100L)); + assertThat(client.prepareSearch("test-idx-2").setSize(0).get().getHits().totalHits(), equalTo(100L)); + + logger.info("--> snapshot"); + assertThat(client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") + .setIndices("test-idx-*").setWaitForCompletion(true).get().getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); + + logger.info("--> deleting indices before restoring"); + assertAcked(client.admin().indices().prepareDelete("test-idx-*").get()); + + blockAllDataNodes("test-repo"); + logger.info("--> execution will be blocked on all data nodes"); + + final ListenableActionFuture restoreFut; + try { + logger.info("--> start restore"); + restoreFut = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .execute(); + + logger.info("--> waiting for block to kick in"); + waitForBlockOnAnyDataNode("test-repo", TimeValue.timeValueMinutes(1)); + + logger.info("--> close index while restore is running"); + try { + client.admin().indices().prepareClose("test-idx-1").get(); + fail("Expected closing index to fail during restore"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("Cannot close indices that are being restored: [[test-idx-1/")); + } + } finally { + // unblock even if the try block fails otherwise we will get bogus failures when we delete all indices in test teardown. + logger.info("--> unblocking all data nodes"); + unblockAllDataNodes("test-repo"); + } + + logger.info("--> wait for restore to finish"); + RestoreSnapshotResponse restoreSnapshotResponse = restoreFut.get(); + logger.info("--> check that all shards were recovered"); + assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); + assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), greaterThan(0)); + assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); + } + public void testDeleteOrphanSnapshot() throws Exception { Client client = client(); @@ -1900,7 +2015,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas shards.put(new ShardId("test-idx", "_na_", 1), new ShardSnapshotStatus("unknown-node", State.ABORTED)); shards.put(new ShardId("test-idx", "_na_", 2), new ShardSnapshotStatus("unknown-node", State.ABORTED)); List entries = new ArrayList<>(); - entries.add(new Entry(new SnapshotId("test-repo", "test-snap"), true, State.ABORTED, Collections.singletonList("test-idx"), System.currentTimeMillis(), shards.build())); + entries.add(new Entry(new SnapshotId("test-repo", "test-snap"), true, false, State.ABORTED, Collections.singletonList("test-idx"), System.currentTimeMillis(), shards.build())); return ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, new SnapshotsInProgress(Collections.unmodifiableList(entries))).build(); } @@ -2051,7 +2166,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas public void testListCorruptedSnapshot() throws Exception { Client client = client(); Path repo = randomRepoPath(); - logger.info("--> creating repository at " + repo.toAbsolutePath()); + logger.info("--> creating repository at {}", repo.toAbsolutePath()); assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.settingsBuilder() .put("location", repo) diff --git a/core/src/test/java/org/elasticsearch/snapshots/SnapshotBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/snapshots/SnapshotBackwardsCompatibilityIT.java index eb069d4721c..b3f466cdcc8 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/SnapshotBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/SnapshotBackwardsCompatibilityIT.java @@ -215,7 +215,7 @@ public class SnapshotBackwardsCompatibilityIT extends ESBackcompatTestCase { logger.info("--> move from 0 to 1 replica"); client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)).get(); } - logger.debug("---> repo exists: " + Files.exists(tempDir.resolve("indices/test/0")) + " files: " + Arrays.toString(FileSystemUtils.files(tempDir.resolve("indices/test/0")))); // it's only one shard! + logger.debug("---> repo exists: {} files: {}", Files.exists(tempDir.resolve("indices/test/0")), Arrays.toString(FileSystemUtils.files(tempDir.resolve("indices/test/0")))); // it's only one shard! CreateSnapshotResponse createSnapshotResponseSecond = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-1").setWaitForCompletion(true).setIndices("test").get(); assertThat(createSnapshotResponseSecond.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponseSecond.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponseSecond.getSnapshotInfo().totalShards())); diff --git a/core/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java b/core/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java index e92a28db86b..426dd2546dd 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java +++ b/core/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java @@ -20,20 +20,19 @@ package org.elasticsearch.snapshots.mockstore; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.SnapshotId; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.env.Environment; import org.elasticsearch.index.snapshots.IndexShardRepository; @@ -49,8 +48,6 @@ import java.io.UnsupportedEncodingException; import java.nio.file.Path; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; -import java.util.Collection; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @@ -63,6 +60,10 @@ public class MockRepository extends FsRepository { public static class Plugin extends org.elasticsearch.plugins.Plugin { + public static final Setting USERNAME_SETTING = Setting.simpleString("secret.mock.username", Property.NodeScope); + public static final Setting PASSWORD_SETTING = + Setting.simpleString("secret.mock.password", Property.NodeScope, Property.Filtered); + @Override public String name() { return "mock-repository"; @@ -78,8 +79,8 @@ public class MockRepository extends FsRepository { } public void onModule(SettingsModule module) { - module.registerSettingsFilter("secret.mock.password"); - + module.registerSetting(USERNAME_SETTING); + module.registerSetting(PASSWORD_SETTING); } } @@ -120,7 +121,7 @@ public class MockRepository extends FsRepository { blockOnInitialization = repositorySettings.settings().getAsBoolean("block_on_init", false); randomPrefix = repositorySettings.settings().get("random", "default"); waitAfterUnblock = repositorySettings.settings().getAsLong("wait_after_unblock", 0L); - logger.info("starting mock repository with random prefix " + randomPrefix); + logger.info("starting mock repository with random prefix {}", randomPrefix); mockBlobStore = new MockBlobStore(super.blobStore()); } @@ -176,14 +177,12 @@ public class MockRepository extends FsRepository { } public synchronized void unblockExecution() { - if (blocked) { - blocked = false; - // Clean blocking flags, so we wouldn't try to block again - blockOnDataFiles = false; - blockOnControlFiles = false; - blockOnInitialization = false; - this.notifyAll(); - } + blocked = false; + // Clean blocking flags, so we wouldn't try to block again + blockOnDataFiles = false; + blockOnControlFiles = false; + blockOnInitialization = false; + this.notifyAll(); } public boolean blocked() { diff --git a/core/src/test/java/org/elasticsearch/test/MockLogAppender.java b/core/src/test/java/org/elasticsearch/test/MockLogAppender.java index c0866a81081..9e4a881b25b 100644 --- a/core/src/test/java/org/elasticsearch/test/MockLogAppender.java +++ b/core/src/test/java/org/elasticsearch/test/MockLogAppender.java @@ -80,7 +80,7 @@ public class MockLogAppender extends AppenderSkeleton { protected final String logger; protected final Level level; protected final String message; - protected boolean saw; + volatile boolean saw; public AbstractEventExpectation(String name, String logger, Level level, String message) { this.name = name; diff --git a/core/src/test/java/org/elasticsearch/test/NoopDiscovery.java b/core/src/test/java/org/elasticsearch/test/NoopDiscovery.java index 427dce714e8..3193aaf458e 100644 --- a/core/src/test/java/org/elasticsearch/test/NoopDiscovery.java +++ b/core/src/test/java/org/elasticsearch/test/NoopDiscovery.java @@ -21,13 +21,11 @@ package org.elasticsearch.test; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingService; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.component.LifecycleListener; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.DiscoveryStats; -import org.elasticsearch.node.service.NodeService; public class NoopDiscovery implements Discovery { @@ -42,11 +40,6 @@ public class NoopDiscovery implements Discovery { return null; } - @Override - public void setNodeService(@Nullable NodeService nodeService) { - - } - @Override public void setRoutingService(RoutingService routingService) { diff --git a/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java b/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java index 4cc7f8f8487..95984da55f6 100644 --- a/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java +++ b/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java @@ -20,12 +20,12 @@ package org.elasticsearch.test.geo; import com.carrotsearch.randomizedtesting.generators.RandomInts; -import com.spatial4j.core.context.jts.JtsSpatialContext; -import com.spatial4j.core.distance.DistanceUtils; -import com.spatial4j.core.exception.InvalidShapeException; -import com.spatial4j.core.shape.Point; -import com.spatial4j.core.shape.Rectangle; -import com.spatial4j.core.shape.impl.Range; +import org.locationtech.spatial4j.context.jts.JtsSpatialContext; +import org.locationtech.spatial4j.distance.DistanceUtils; +import org.locationtech.spatial4j.exception.InvalidShapeException; +import org.locationtech.spatial4j.shape.Point; +import org.locationtech.spatial4j.shape.Rectangle; +import org.locationtech.spatial4j.shape.impl.Range; import com.vividsolutions.jts.algorithm.ConvexHull; import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.Geometry; @@ -45,7 +45,7 @@ import org.junit.Assert; import java.util.Random; -import static com.spatial4j.core.shape.SpatialRelation.CONTAINS; +import static org.locationtech.spatial4j.shape.SpatialRelation.CONTAINS; /** * Random geoshape generation utilities for randomized {@code geo_shape} type testing diff --git a/core/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchGeoAssertions.java b/core/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchGeoAssertions.java index 3400f9637ff..5fff4a61f86 100644 --- a/core/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchGeoAssertions.java +++ b/core/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchGeoAssertions.java @@ -19,12 +19,12 @@ package org.elasticsearch.test.hamcrest; -import com.spatial4j.core.shape.Shape; -import com.spatial4j.core.shape.ShapeCollection; -import com.spatial4j.core.shape.impl.GeoCircle; -import com.spatial4j.core.shape.impl.RectangleImpl; -import com.spatial4j.core.shape.jts.JtsGeometry; -import com.spatial4j.core.shape.jts.JtsPoint; +import org.locationtech.spatial4j.shape.Shape; +import org.locationtech.spatial4j.shape.ShapeCollection; +import org.locationtech.spatial4j.shape.impl.GeoCircle; +import org.locationtech.spatial4j.shape.impl.RectangleImpl; +import org.locationtech.spatial4j.shape.jts.JtsGeometry; +import org.locationtech.spatial4j.shape.jts.JtsPoint; import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.Geometry; import com.vividsolutions.jts.geom.LineString; diff --git a/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolStatsTests.java b/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolStatsTests.java index b18be91f575..25b0e06c4f1 100644 --- a/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolStatsTests.java +++ b/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolStatsTests.java @@ -70,7 +70,6 @@ public class ThreadPoolStatsTests extends ESTestCase { stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.WARMER, -1, 0, 0, 0, 0, 0L)); stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.GENERIC, -1, 0, 0, 0, 0, 0L)); stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.FORCE_MERGE, -1, 0, 0, 0, 0, 0L)); - stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.PERCOLATE, -1, 0, 0, 0, 0, 0L)); stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.SAME, -1, 0, 0, 0, 0, 0L)); @@ -103,7 +102,6 @@ public class ThreadPoolStatsTests extends ESTestCase { } assertThat(names, contains(ThreadPool.Names.FORCE_MERGE, ThreadPool.Names.GENERIC, - ThreadPool.Names.PERCOLATE, ThreadPool.Names.SAME, ThreadPool.Names.SEARCH, ThreadPool.Names.SUGGEST, diff --git a/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index f65ce317d72..2a25b86bc83 100644 --- a/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -56,11 +56,11 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { protected ThreadPool threadPool; - protected static final Version version0 = Version.fromId(/*0*/99); + protected static final Version version0 = Version.CURRENT.minimumCompatibilityVersion(); protected DiscoveryNode nodeA; protected MockTransportService serviceA; - protected static final Version version1 = Version.fromId(199); + protected static final Version version1 = Version.fromId(Version.CURRENT.id+1); protected DiscoveryNode nodeB; protected MockTransportService serviceB; @@ -137,8 +137,8 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { try { channel.sendResponse(new StringMessageResponse("hello " + request.message)); } catch (IOException e) { - e.printStackTrace(); - assertThat(e.getMessage(), false, equalTo(true)); + logger.error("Unexpected failure", e); + fail(e.getMessage()); } } }); @@ -162,8 +162,8 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { @Override public void handleException(TransportException exp) { - exp.printStackTrace(); - assertThat("got exception instead of a response: " + exp.getMessage(), false, equalTo(true)); + logger.error("Unexpected failure", exp); + fail("got exception instead of a response: " + exp.getMessage()); } }); @@ -193,8 +193,8 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { @Override public void handleException(TransportException exp) { - exp.printStackTrace(); - assertThat("got exception instead of a response: " + exp.getMessage(), false, equalTo(true)); + logger.error("Unexpected failure", exp); + fail("got exception instead of a response: " + exp.getMessage()); } }); @@ -218,7 +218,8 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { threadPool.getThreadContext().putHeader("test.pong.user", "pong_user"); channel.sendResponse(response); } catch (IOException e) { - assertThat(e.getMessage(), false, equalTo(true)); + logger.error("Unexpected failure", e); + fail(e.getMessage()); } }); final Object context = new Object(); @@ -245,7 +246,8 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { @Override public void handleException(TransportException exp) { - assertThat("got exception instead of a response: " + exp.getMessage(), false, equalTo(true)); + logger.error("Unexpected failure", exp); + fail("got exception instead of a response: " + exp.getMessage()); } }; StringMessageRequest ping = new StringMessageRequest("ping"); @@ -317,8 +319,8 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { try { channel.sendResponse(TransportResponse.Empty.INSTANCE, TransportResponseOptions.builder().withCompress(true).build()); } catch (IOException e) { - e.printStackTrace(); - assertThat(e.getMessage(), false, equalTo(true)); + logger.error("Unexpected failure", e); + fail(e.getMessage()); } } }); @@ -341,8 +343,8 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { @Override public void handleException(TransportException exp) { - exp.printStackTrace(); - assertThat("got exception instead of a response: " + exp.getMessage(), false, equalTo(true)); + logger.error("Unexpected failure", exp); + fail("got exception instead of a response: " + exp.getMessage()); } }); @@ -364,8 +366,8 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { try { channel.sendResponse(new StringMessageResponse("hello " + request.message), TransportResponseOptions.builder().withCompress(true).build()); } catch (IOException e) { - e.printStackTrace(); - assertThat(e.getMessage(), false, equalTo(true)); + logger.error("Unexpected failure", e); + fail(e.getMessage()); } } }); @@ -389,8 +391,8 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { @Override public void handleException(TransportException exp) { - exp.printStackTrace(); - assertThat("got exception instead of a response: " + exp.getMessage(), false, equalTo(true)); + logger.error("Unexpected failure", exp); + fail("got exception instead of a response: " + exp.getMessage()); } }); @@ -540,20 +542,21 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { } public void testTimeoutSendExceptionWithDelayedResponse() throws Exception { + CountDownLatch doneLatch = new CountDownLatch(1); serviceA.registerRequestHandler("sayHelloTimeoutDelayedResponse", StringMessageRequest::new, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override public void messageReceived(StringMessageRequest request, TransportChannel channel) { TimeValue sleep = TimeValue.parseTimeValue(request.message, null, "sleep"); try { - Thread.sleep(sleep.millis()); + doneLatch.await(sleep.millis(), TimeUnit.MILLISECONDS); } catch (InterruptedException e) { // ignore } try { channel.sendResponse(new StringMessageResponse("hello " + request.message)); } catch (IOException e) { - e.printStackTrace(); - assertThat(e.getMessage(), false, equalTo(true)); + logger.error("Unexpected failure", e); + fail(e.getMessage()); } } }); @@ -613,7 +616,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { @Override public void handleException(TransportException exp) { - exp.printStackTrace(); + logger.error("Unexpected failure", exp); fail("got exception instead of a response for " + counter + ": " + exp.getDetailedMessage()); } }); @@ -623,6 +626,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { } serviceA.removeHandler("sayHelloTimeoutDelayedResponse"); + doneLatch.countDown(); } @TestLogging(value = "test. transport.tracer:TRACE") @@ -959,8 +963,8 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { @Override public void handleException(TransportException exp) { - exp.printStackTrace(); - fail(); + logger.error("Unexpected failure", exp); + fail("got exception instead of a response: " + exp.getMessage()); } @Override @@ -1000,8 +1004,8 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { @Override public void handleException(TransportException exp) { - exp.printStackTrace(); - fail(); + logger.error("Unexpected failure", exp); + fail("got exception instead of a response: " + exp.getMessage()); } @Override @@ -1044,8 +1048,8 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { @Override public void handleException(TransportException exp) { - exp.printStackTrace(); - fail(); + logger.error("Unexpected failure", exp); + fail("got exception instead of a response: " + exp.getMessage()); } @Override @@ -1084,8 +1088,8 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { @Override public void handleException(TransportException exp) { - exp.printStackTrace(); - fail(); + logger.error("Unexpected failure", exp); + fail("got exception instead of a response: " + exp.getMessage()); } @Override diff --git a/core/src/test/java/org/elasticsearch/transport/TransportModuleTests.java b/core/src/test/java/org/elasticsearch/transport/TransportModuleTests.java index cebd90ec08b..d587ab05e45 100644 --- a/core/src/test/java/org/elasticsearch/transport/TransportModuleTests.java +++ b/core/src/test/java/org/elasticsearch/transport/TransportModuleTests.java @@ -41,8 +41,8 @@ public class TransportModuleTests extends ModuleTestCase { static class FakeTransportService extends TransportService { @Inject - public FakeTransportService(Settings settings, Transport transport, ThreadPool threadPool, NamedWriteableRegistry namedWriteableRegistry) { - super(settings, transport, threadPool, namedWriteableRegistry); + public FakeTransportService(Settings settings, Transport transport, ThreadPool threadPool) { + super(settings, transport, threadPool); } } } diff --git a/core/src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java b/core/src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java index 9581dfff42f..f9451375590 100644 --- a/core/src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java +++ b/core/src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.transport.netty; +import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.util.concurrent.KeyedLock; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; @@ -29,9 +30,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; public class KeyedLockTests extends ESTestCase { @@ -68,28 +67,6 @@ public class KeyedLockTests extends ESTestCase { } } - public void testCannotAcquireTwoLocks() throws InterruptedException { - KeyedLock connectionLock = new KeyedLock(); - String name = randomRealisticUnicodeOfLength(scaledRandomIntBetween(10, 50)); - connectionLock.acquire(name); - try { - connectionLock.acquire(name); - fail("Expected IllegalStateException"); - } catch (IllegalStateException e) { - assertThat(e.getMessage(), containsString("Lock already acquired")); - } - } - - public void testCannotReleaseUnacquiredLock() throws InterruptedException { - KeyedLock connectionLock = new KeyedLock(); - String name = randomRealisticUnicodeOfLength(scaledRandomIntBetween(10, 50)); - try { - connectionLock.release(name); - fail("Expected IllegalStateException"); - } catch (IllegalStateException e) { - assertThat(e.getMessage(), is("Lock not acquired")); - } - } public static class AcquireAndReleaseThread extends Thread { private CountDownLatch startLatch; @@ -117,16 +94,16 @@ public class KeyedLockTests extends ESTestCase { int numRuns = scaledRandomIntBetween(5000, 50000); for (int i = 0; i < numRuns; i++) { String curName = names[randomInt(names.length - 1)]; - connectionLock.acquire(curName); - try { + assert connectionLock.isHeldByCurrentThread(curName) == false; + try (Releasable ignored = connectionLock.acquire(curName)) { + assert connectionLock.isHeldByCurrentThread(curName); + assert connectionLock.isHeldByCurrentThread(curName + "bla") == false; Integer integer = counter.get(curName); if (integer == null) { counter.put(curName, 1); } else { counter.put(curName, integer.intValue() + 1); } - } finally { - connectionLock.release(curName); } AtomicInteger atomicInteger = new AtomicInteger(0); AtomicInteger value = safeCounter.putIfAbsent(curName, atomicInteger); diff --git a/core/src/test/java/org/elasticsearch/transport/netty/NettyScheduledPingTests.java b/core/src/test/java/org/elasticsearch/transport/netty/NettyScheduledPingTests.java index 3d6ff1cd067..f8ffbd360c7 100644 --- a/core/src/test/java/org/elasticsearch/transport/netty/NettyScheduledPingTests.java +++ b/core/src/test/java/org/elasticsearch/transport/netty/NettyScheduledPingTests.java @@ -54,13 +54,13 @@ public class NettyScheduledPingTests extends ESTestCase { NamedWriteableRegistry registryA = new NamedWriteableRegistry(); final NettyTransport nettyA = new NettyTransport(settings, threadPool, new NetworkService(settings), BigArrays.NON_RECYCLING_INSTANCE, Version.CURRENT, registryA); - MockTransportService serviceA = new MockTransportService(settings, nettyA, threadPool, registryA); + MockTransportService serviceA = new MockTransportService(settings, nettyA, threadPool); serviceA.start(); serviceA.acceptIncomingRequests(); NamedWriteableRegistry registryB = new NamedWriteableRegistry(); final NettyTransport nettyB = new NettyTransport(settings, threadPool, new NetworkService(settings), BigArrays.NON_RECYCLING_INSTANCE, Version.CURRENT, registryB); - MockTransportService serviceB = new MockTransportService(settings, nettyB, threadPool, registryB); + MockTransportService serviceB = new MockTransportService(settings, nettyB, threadPool); serviceB.start(); serviceB.acceptIncomingRequests(); @@ -86,8 +86,8 @@ public class NettyScheduledPingTests extends ESTestCase { try { channel.sendResponse(TransportResponse.Empty.INSTANCE, TransportResponseOptions.EMPTY); } catch (IOException e) { - e.printStackTrace(); - assertThat(e.getMessage(), false, equalTo(true)); + logger.error("Unexpected failure", e); + fail(e.getMessage()); } } }); @@ -113,8 +113,8 @@ public class NettyScheduledPingTests extends ESTestCase { @Override public void handleException(TransportException exp) { - exp.printStackTrace(); - assertThat("got exception instead of a response: " + exp.getMessage(), false, equalTo(true)); + logger.error("Unexpected failure", exp); + fail("got exception instead of a response: " + exp.getMessage()); } }).txGet(); } diff --git a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportIT.java b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportIT.java index ef408d16784..d9466d28424 100644 --- a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportIT.java +++ b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportIT.java @@ -157,7 +157,7 @@ public class NettyTransportIT extends ESIntegTestCase { try { transportChannel.sendResponse(e); } catch (IOException e1) { - logger.warn("Failed to send error message back to client for action [" + action + "]", e); + logger.warn("Failed to send error message back to client for action [{}]", e, action); logger.warn("Actual Exception", e1); } } @@ -194,7 +194,7 @@ public class NettyTransportIT extends ESIntegTestCase { try { transportChannel.sendResponse(e); } catch (Throwable e1) { - logger.warn("Failed to send error message back to client for action [" + reg.getAction() + "]", e1); + logger.warn("Failed to send error message back to client for action [{}]", e1, reg.getAction()); logger.warn("Actual Exception", e); } } } diff --git a/core/src/test/java/org/elasticsearch/tribe/TribeIT.java b/core/src/test/java/org/elasticsearch/tribe/TribeIT.java index 55a79ffddfc..7313d880a63 100644 --- a/core/src/test/java/org/elasticsearch/tribe/TribeIT.java +++ b/core/src/test/java/org/elasticsearch/tribe/TribeIT.java @@ -132,6 +132,9 @@ public class TribeIT extends ESIntegTestCase { Settings.Builder tribe1Defaults = Settings.builder(); Settings.Builder tribe2Defaults = Settings.builder(); for (Map.Entry entry : asMap.entrySet()) { + if (entry.getKey().startsWith("path.")) { + continue; + } tribe1Defaults.put("tribe.t1." + entry.getKey(), entry.getValue()); tribe2Defaults.put("tribe.t2." + entry.getKey(), entry.getValue()); } diff --git a/core/src/test/java/org/elasticsearch/tribe/TribeServiceTests.java b/core/src/test/java/org/elasticsearch/tribe/TribeServiceTests.java new file mode 100644 index 00000000000..5174a317a40 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/tribe/TribeServiceTests.java @@ -0,0 +1,96 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.tribe; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; + +public class TribeServiceTests extends ESTestCase { + public void testMinimalSettings() { + Settings globalSettings = Settings.builder() + .put("node.name", "nodename") + .put("path.home", "some/path").build(); + Settings clientSettings = TribeService.buildClientSettings("tribe1", globalSettings, Settings.EMPTY); + assertEquals("some/path", clientSettings.get("path.home")); + assertEquals("nodename/tribe1", clientSettings.get("node.name")); + assertEquals("tribe1", clientSettings.get("tribe.name")); + assertEquals("false", clientSettings.get("http.enabled")); + assertEquals("true", clientSettings.get("node.client")); + assertEquals(5, clientSettings.getAsMap().size()); + } + + public void testEnvironmentSettings() { + Settings globalSettings = Settings.builder() + .put("node.name", "nodename") + .put("path.home", "some/path") + .put("path.conf", "conf/path") + .put("path.plugins", "plugins/path") + .put("path.scripts", "scripts/path") + .put("path.logs", "logs/path").build(); + Settings clientSettings = TribeService.buildClientSettings("tribe1", globalSettings, Settings.EMPTY); + assertEquals("some/path", clientSettings.get("path.home")); + assertEquals("conf/path", clientSettings.get("path.conf")); + assertEquals("plugins/path", clientSettings.get("path.plugins")); + assertEquals("scripts/path", clientSettings.get("path.scripts")); + assertEquals("logs/path", clientSettings.get("path.logs")); + + Settings tribeSettings = Settings.builder() + .put("path.home", "alternate/path").build(); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { + TribeService.buildClientSettings("tribe1", globalSettings, tribeSettings); + }); + assertTrue(e.getMessage(), e.getMessage().contains("Setting [path.home] not allowed in tribe client")); + } + + public void testPassthroughSettings() { + Settings globalSettings = Settings.builder() + .put("node.name", "nodename") + .put("path.home", "some/path") + .put("network.host", "0.0.0.0") + .put("network.bind_host", "1.1.1.1") + .put("network.publish_host", "2.2.2.2") + .put("transport.host", "3.3.3.3") + .put("transport.bind_host", "4.4.4.4") + .put("transport.publish_host", "5.5.5.5").build(); + Settings clientSettings = TribeService.buildClientSettings("tribe1", globalSettings, Settings.EMPTY); + assertEquals("0.0.0.0", clientSettings.get("network.host")); + assertEquals("1.1.1.1", clientSettings.get("network.bind_host")); + assertEquals("2.2.2.2", clientSettings.get("network.publish_host")); + assertEquals("3.3.3.3", clientSettings.get("transport.host")); + assertEquals("4.4.4.4", clientSettings.get("transport.bind_host")); + assertEquals("5.5.5.5", clientSettings.get("transport.publish_host")); + + // per tribe client overrides still work + Settings tribeSettings = Settings.builder() + .put("network.host", "3.3.3.3") + .put("network.bind_host", "4.4.4.4") + .put("network.publish_host", "5.5.5.5") + .put("transport.host", "6.6.6.6") + .put("transport.bind_host", "7.7.7.7") + .put("transport.publish_host", "8.8.8.8").build(); + clientSettings = TribeService.buildClientSettings("tribe1", globalSettings, tribeSettings); + assertEquals("3.3.3.3", clientSettings.get("network.host")); + assertEquals("4.4.4.4", clientSettings.get("network.bind_host")); + assertEquals("5.5.5.5", clientSettings.get("network.publish_host")); + assertEquals("6.6.6.6", clientSettings.get("transport.host")); + assertEquals("7.7.7.7", clientSettings.get("transport.bind_host")); + assertEquals("8.8.8.8", clientSettings.get("transport.publish_host")); + } +} diff --git a/core/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java b/core/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java index 5e9bc80b9a9..5fc24094bd3 100644 --- a/core/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java +++ b/core/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java @@ -243,9 +243,9 @@ public class SimpleValidateQueryIT extends ESIntegTestCase { // fuzzy queries assertExplanation(QueryBuilders.fuzzyQuery("field", "the").fuzziness(Fuzziness.fromEdits(2)), - containsString("field:the field:tree^0.3333333"), true); + containsString("field:the (field:tree)^0.3333333"), true); assertExplanation(QueryBuilders.fuzzyQuery("field", "jump"), - containsString("field:jumps^0.75"), true); + containsString("(field:jumps)^0.75"), true); // more like this queries assertExplanation(QueryBuilders.moreLikeThisQuery(new String[] { "field" }, null, MoreLikeThisQueryBuilder.ids("1")) diff --git a/core/src/test/java/org/elasticsearch/versioning/ConcurrentDocumentOperationIT.java b/core/src/test/java/org/elasticsearch/versioning/ConcurrentDocumentOperationIT.java index bb8636d36e9..6b69a038a00 100644 --- a/core/src/test/java/org/elasticsearch/versioning/ConcurrentDocumentOperationIT.java +++ b/core/src/test/java/org/elasticsearch/versioning/ConcurrentDocumentOperationIT.java @@ -54,7 +54,7 @@ public class ConcurrentDocumentOperationIT extends ESIntegTestCase { @Override public void onFailure(Throwable e) { - e.printStackTrace(); + logger.error("Unexpected exception while indexing", e); failure.set(e); latch.countDown(); } diff --git a/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java b/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java index 3432411b225..4e319c65bac 100644 --- a/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java +++ b/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java @@ -354,9 +354,7 @@ public class SimpleVersioningIT extends ESIntegTestCase { switch (random.nextInt(6)) { case 0: // random simple - if (VERBOSE) { - System.out.println("TEST: use random simple ids"); - } + logger.info("--> use random simple ids"); ids = new IDSource() { @Override public String next() { @@ -366,9 +364,7 @@ public class SimpleVersioningIT extends ESIntegTestCase { break; case 1: // random realistic unicode - if (VERBOSE) { - System.out.println("TEST: use random realistic unicode ids"); - } + logger.info("--> use random realistic unicode ids"); ids = new IDSource() { @Override public String next() { @@ -378,9 +374,7 @@ public class SimpleVersioningIT extends ESIntegTestCase { break; case 2: // sequential - if (VERBOSE) { - System.out.println("TEST: use seuquential ids"); - } + logger.info("--> use sequential ids"); ids = new IDSource() { int upto; @@ -392,9 +386,7 @@ public class SimpleVersioningIT extends ESIntegTestCase { break; case 3: // zero-pad sequential - if (VERBOSE) { - System.out.println("TEST: use zero-pad seuquential ids"); - } + logger.info("--> use zero-padded sequential ids"); ids = new IDSource() { final int radix = TestUtil.nextInt(random, Character.MIN_RADIX, Character.MAX_RADIX); final String zeroPad = String.format(Locale.ROOT, "%0" + TestUtil.nextInt(random, 4, 20) + "d", 0); @@ -409,9 +401,7 @@ public class SimpleVersioningIT extends ESIntegTestCase { break; case 4: // random long - if (VERBOSE) { - System.out.println("TEST: use random long ids"); - } + logger.info("--> use random long ids"); ids = new IDSource() { final int radix = TestUtil.nextInt(random, Character.MIN_RADIX, Character.MAX_RADIX); int upto; @@ -424,9 +414,7 @@ public class SimpleVersioningIT extends ESIntegTestCase { break; case 5: // zero-pad random long - if (VERBOSE) { - System.out.println("TEST: use zero-pad random long ids"); - } + logger.info("--> use zero-padded random long ids"); ids = new IDSource() { final int radix = TestUtil.nextInt(random, Character.MIN_RADIX, Character.MAX_RADIX); final String zeroPad = String.format(Locale.ROOT, "%015d", 0); @@ -539,9 +527,7 @@ public class SimpleVersioningIT extends ESIntegTestCase { idPrefix = ""; } else { idPrefix = TestUtil.randomSimpleString(random); - if (VERBOSE) { - System.out.println("TEST: use id prefix: " + idPrefix); - } + logger.debug("--> use id prefix {}", idPrefix); } int numIDs; @@ -564,9 +550,7 @@ public class SimpleVersioningIT extends ESIntegTestCase { final IDAndVersion[] idVersions = new IDAndVersion[TestUtil.nextInt(random, numIDs / 2, numIDs * (TEST_NIGHTLY ? 8 : 2))]; final Map truth = new HashMap<>(); - if (VERBOSE) { - System.out.println("TEST: use " + numIDs + " ids; " + idVersions.length + " operations"); - } + logger.debug("--> use {} ids; {} operations", numIDs, idVersions.length); for (int i = 0; i < idVersions.length; i++) { @@ -596,10 +580,9 @@ public class SimpleVersioningIT extends ESIntegTestCase { idVersions[i] = x; } - if (VERBOSE) { - for (IDAndVersion idVersion : idVersions) { - System.out.println("id=" + idVersion.id + " version=" + idVersion.version + " delete?=" + idVersion.delete + " truth?=" + (truth.get(idVersion.id) == idVersion)); - } + for (IDAndVersion idVersion : idVersions) { + logger.debug("--> id={} version={} delete?={} truth?={}", idVersion.id, idVersion.version, idVersion.delete, + truth.get(idVersion.id) == idVersion); } final AtomicInteger upto = new AtomicInteger(); @@ -623,8 +606,8 @@ public class SimpleVersioningIT extends ESIntegTestCase { if (index >= idVersions.length) { break; } - if (VERBOSE && index % 100 == 0) { - System.out.println(Thread.currentThread().getName() + ": index=" + index); + if (index % 100 == 0) { + logger.trace("{}: index={}", Thread.currentThread().getName(), index); } IDAndVersion idVersion = idVersions[index]; @@ -657,18 +640,18 @@ public class SimpleVersioningIT extends ESIntegTestCase { idVersion.indexFinishTime = System.nanoTime() - startTime; if (threadRandom.nextInt(100) == 7) { - System.out.println(threadID + ": TEST: now refresh at " + (System.nanoTime() - startTime)); + logger.trace("--> {}: TEST: now refresh at {}", threadID, System.nanoTime() - startTime); refresh(); - System.out.println(threadID + ": TEST: refresh done at " + (System.nanoTime() - startTime)); + logger.trace("--> {}: TEST: refresh done at {}", threadID, System.nanoTime() - startTime); } if (threadRandom.nextInt(100) == 7) { - System.out.println(threadID + ": TEST: now flush at " + (System.nanoTime() - startTime)); + logger.trace("--> {}: TEST: now flush at {}", threadID, System.nanoTime() - startTime); try { flush(); } catch (FlushNotAllowedEngineException fnaee) { // OK } - System.out.println(threadID + ": TEST: flush done at " + (System.nanoTime() - startTime)); + logger.trace("--> {}: TEST: flush done at {}", threadID, System.nanoTime() - startTime); } } } catch (Exception e) { @@ -696,16 +679,17 @@ public class SimpleVersioningIT extends ESIntegTestCase { } long actualVersion = client().prepareGet("test", "type", id).execute().actionGet().getVersion(); if (actualVersion != expected) { - System.out.println("FAILED: idVersion=" + idVersion + " actualVersion=" + actualVersion); + logger.error("--> FAILED: idVersion={} actualVersion= {}", idVersion, actualVersion); failed = true; } } if (failed) { - System.out.println("All versions:"); + StringBuilder sb = new StringBuilder(); for (int i = 0; i < idVersions.length; i++) { - System.out.println("i=" + i + " " + idVersions[i]); + sb.append("i=").append(i).append(" ").append(idVersions[i]).append(System.lineSeparator()); } + logger.error("All versions: {}", sb); fail("wrong versions for some IDs"); } } diff --git a/core/src/test/resources/indices/bwc/index-2.2.0.zip b/core/src/test/resources/indices/bwc/index-2.2.0.zip index b645084eeef..797ca24f4ed 100644 Binary files a/core/src/test/resources/indices/bwc/index-2.2.0.zip and b/core/src/test/resources/indices/bwc/index-2.2.0.zip differ diff --git a/core/src/test/resources/indices/bwc/index-2.2.1.zip b/core/src/test/resources/indices/bwc/index-2.2.1.zip new file mode 100644 index 00000000000..8d8e55ae62d Binary files /dev/null and b/core/src/test/resources/indices/bwc/index-2.2.1.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.2.0.zip b/core/src/test/resources/indices/bwc/repo-2.2.0.zip index f895e11fdd7..f2208b734c0 100644 Binary files a/core/src/test/resources/indices/bwc/repo-2.2.0.zip and b/core/src/test/resources/indices/bwc/repo-2.2.0.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.2.1.zip b/core/src/test/resources/indices/bwc/repo-2.2.1.zip new file mode 100644 index 00000000000..35f7425b20e Binary files /dev/null and b/core/src/test/resources/indices/bwc/repo-2.2.1.zip differ diff --git a/core/src/test/resources/org/elasticsearch/common/logging/config/logging.yml b/core/src/test/resources/org/elasticsearch/common/logging/config/logging.yml index bd7a15f4434..515e4320fd2 100644 --- a/core/src/test/resources/org/elasticsearch/common/logging/config/logging.yml +++ b/core/src/test/resources/org/elasticsearch/common/logging/config/logging.yml @@ -1,4 +1,4 @@ -# you can override this using by setting a system property, for example -Des.logger.level=DEBUG +# you can override this using by setting a system property, for example -Ees.logger.level=DEBUG es.logger.level: INFO rootLogger: ${es.logger.level}, console logger: diff --git a/core/src/test/resources/org/elasticsearch/index/analysis/keep_analysis.json b/core/src/test/resources/org/elasticsearch/index/analysis/keep_analysis.json index 233d6f3e3d7..0ed95e16332 100644 --- a/core/src/test/resources/org/elasticsearch/index/analysis/keep_analysis.json +++ b/core/src/test/resources/org/elasticsearch/index/analysis/keep_analysis.json @@ -9,9 +9,7 @@ }, "my_case_sensitive_keep_filter":{ "type":"keep", - "keep_words" : ["Hello", "worlD"], - "enable_position_increments" : false, - "version" : "4.2" + "keep_words" : ["Hello", "worlD"] } } } diff --git a/core/src/test/resources/org/elasticsearch/index/mapper/all/mapping.json b/core/src/test/resources/org/elasticsearch/index/mapper/all/mapping.json index e0a7bfeba89..80fb091b973 100644 --- a/core/src/test/resources/org/elasticsearch/index/mapper/all/mapping.json +++ b/core/src/test/resources/org/elasticsearch/index/mapper/all/mapping.json @@ -2,7 +2,7 @@ "person":{ "_all":{ "enabled":true, - "omit_norms":true + "norms":false }, "properties":{ "name":{ diff --git a/core/src/test/resources/org/elasticsearch/index/mapper/update/all_mapping_create_index.json b/core/src/test/resources/org/elasticsearch/index/mapper/update/all_mapping_create_index.json index e9604ae458f..67b03178c82 100644 --- a/core/src/test/resources/org/elasticsearch/index/mapper/update/all_mapping_create_index.json +++ b/core/src/test/resources/org/elasticsearch/index/mapper/update/all_mapping_create_index.json @@ -7,7 +7,7 @@ "store_term_vector_offsets": true, "store_term_vector_positions": true, "store_term_vector_payloads": true, - "omit_norms": true, + "norms": false, "analyzer": "standard", "search_analyzer": "whitespace", "similarity": "my_similarity", diff --git a/core/src/test/resources/org/elasticsearch/index/mapper/update/all_mapping_update_with_conflicts.json b/core/src/test/resources/org/elasticsearch/index/mapper/update/all_mapping_update_with_conflicts.json index 6ddde341fc2..6164c3f5ca8 100644 --- a/core/src/test/resources/org/elasticsearch/index/mapper/update/all_mapping_update_with_conflicts.json +++ b/core/src/test/resources/org/elasticsearch/index/mapper/update/all_mapping_update_with_conflicts.json @@ -7,7 +7,7 @@ "store_term_vector_offsets": false, "store_term_vector_positions": false, "store_term_vector_payloads": false, - "omit_norms": false, + "norms": true, "analyzer": "whitespace", "search_analyzer": "standard", "similarity": "BM25", diff --git a/dev-tools/create_bwc_index.py b/dev-tools/create_bwc_index.py index c8ade7f866a..361934908ec 100644 --- a/dev-tools/create_bwc_index.py +++ b/dev-tools/create_bwc_index.py @@ -247,6 +247,25 @@ def generate_index(client, version, index_name): } } + mappings['norms'] = { + 'properties': { + 'string_with_norms_disabled': { + 'type': 'string', + 'norms': { + 'enabled': False + } + }, + 'string_with_norms_enabled': { + 'type': 'string', + 'index': 'not_analyzed', + 'norms': { + 'enabled': True, + 'loading': 'eager' + } + } + } + } + mappings['doc'] = { 'properties': { 'string': { diff --git a/dev-tools/es_release_notes.pl b/dev-tools/es_release_notes.pl index c3e93f91076..1a7565bfc56 100755 --- a/dev-tools/es_release_notes.pl +++ b/dev-tools/es_release_notes.pl @@ -29,7 +29,7 @@ my $Issue_URL = "http://github.com/${User_Repo}issues/"; my @Groups = qw( breaking deprecation feature - enhancement bug regression upgrade build doc test + enhancement bug regression upgrade non-issue build docs test ); my %Group_Labels = ( breaking => 'Breaking changes', @@ -42,6 +42,7 @@ my %Group_Labels = ( regression => 'Regressions', test => 'Tests', upgrade => 'Upgrades', + "non-issue" => 'Non-issue', other => 'NOT CLASSIFIED', ); @@ -157,6 +158,8 @@ sub fetch_issues { ISSUE: for my $issue (@issues) { next if $seen{ $issue->{number} } && !$issue->{pull_request}; + # uncomment for including/excluding PRs already issued in other versions + # next if grep {$_->{name}=~/^v2/} @{$issue->{labels}}; my %labels = map { $_->{name} => 1 } @{ $issue->{labels} }; my ($header) = map { substr( $_, 1 ) } grep {/^:/} keys %labels; $header ||= 'NOT CLASSIFIED'; diff --git a/distribution/build.gradle b/distribution/build.gradle index d70f0254f3b..b9b2784a5b3 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -34,7 +34,7 @@ buildscript { } } dependencies { - classpath 'com.netflix.nebula:gradle-ospackage-plugin:3.1.0' + classpath 'com.netflix.nebula:gradle-ospackage-plugin:3.4.0' } } @@ -337,21 +337,19 @@ configure(subprojects.findAll { ['deb', 'rpm'].contains(it.name) }) { /** * Suck up all the empty directories that we need to install into the path. */ - Closure suckUpEmptyDirectories = { path -> + Closure suckUpEmptyDirectories = { path, u, g -> into(path) { from "${packagingFiles}/${path}" includeEmptyDirs true createDirectoryEntry true - /* All of these empty directories have this ownership. We're just - lucky! */ - user 'elasticsearch' - permissionGroup 'elasticsearch' + user u + permissionGroup g } } - suckUpEmptyDirectories('/var/run') - suckUpEmptyDirectories('/var/log') - suckUpEmptyDirectories('/var/lib') - suckUpEmptyDirectories('/usr/share/elasticsearch') + suckUpEmptyDirectories('/var/run', 'elasticsearch', 'elasticsearch') + suckUpEmptyDirectories('/var/log', 'elasticsearch', 'elasticsearch') + suckUpEmptyDirectories('/var/lib', 'elasticsearch', 'elasticsearch') + suckUpEmptyDirectories('/usr/share/elasticsearch', 'root', 'root') } } diff --git a/distribution/deb/src/main/packaging/init.d/elasticsearch b/distribution/deb/src/main/packaging/init.d/elasticsearch index be45c45d192..1476a520c1d 100755 --- a/distribution/deb/src/main/packaging/init.d/elasticsearch +++ b/distribution/deb/src/main/packaging/init.d/elasticsearch @@ -99,7 +99,7 @@ fi # Define other required variables PID_FILE="$PID_DIR/$NAME.pid" DAEMON=$ES_HOME/bin/elasticsearch -DAEMON_OPTS="-d -p $PID_FILE --default.path.home=$ES_HOME --default.path.logs=$LOG_DIR --default.path.data=$DATA_DIR --default.path.conf=$CONF_DIR" +DAEMON_OPTS="-d -p $PID_FILE -Ees.default.path.logs=$LOG_DIR -Ees.default.path.data=$DATA_DIR -Ees.default.path.conf=$CONF_DIR" export ES_HEAP_SIZE export ES_HEAP_NEWSIZE @@ -107,6 +107,7 @@ export ES_DIRECT_SIZE export ES_JAVA_OPTS export ES_GC_LOG_FILE export JAVA_HOME +export ES_INCLUDE # Check DAEMON exists test -x $DAEMON || exit 0 diff --git a/distribution/licenses/commons-cli-1.3.1.jar.sha1 b/distribution/licenses/commons-cli-1.3.1.jar.sha1 deleted file mode 100644 index fc366d027f5..00000000000 --- a/distribution/licenses/commons-cli-1.3.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1303efbc4b181e5a58bf2e967dc156a3132b97c0 diff --git a/distribution/licenses/jopt-simple-4.9.jar.sha1 b/distribution/licenses/jopt-simple-4.9.jar.sha1 new file mode 100644 index 00000000000..b86fa62ac20 --- /dev/null +++ b/distribution/licenses/jopt-simple-4.9.jar.sha1 @@ -0,0 +1 @@ +ee9e9eaa0a35360dcfeac129ff4923215fd65904 \ No newline at end of file diff --git a/distribution/licenses/jopt-simple-LICENSE.txt b/distribution/licenses/jopt-simple-LICENSE.txt new file mode 100644 index 00000000000..85f923a9526 --- /dev/null +++ b/distribution/licenses/jopt-simple-LICENSE.txt @@ -0,0 +1,24 @@ +/* + The MIT License + + Copyright (c) 2004-2015 Paul R. Holser, Jr. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ diff --git a/distribution/licenses/jopt-simple-NOTICE.txt b/distribution/licenses/jopt-simple-NOTICE.txt new file mode 100644 index 00000000000..e69de29bb2d diff --git a/distribution/licenses/lucene-analyzers-common-5.5.0.jar.sha1 b/distribution/licenses/lucene-analyzers-common-5.5.0.jar.sha1 deleted file mode 100644 index dcdeb2cb477..00000000000 --- a/distribution/licenses/lucene-analyzers-common-5.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1e0e8243a4410be20c34683034fafa7bb52e55cc \ No newline at end of file diff --git a/distribution/licenses/lucene-analyzers-common-6.0.0-snapshot-f0aa4fc.jar.sha1 b/distribution/licenses/lucene-analyzers-common-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..2ed6eb6ef56 --- /dev/null +++ b/distribution/licenses/lucene-analyzers-common-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +cd2388adc4b33c7530bbb8cd386e5c8c5c8e6aca \ No newline at end of file diff --git a/distribution/licenses/lucene-backward-codecs-5.5.0.jar.sha1 b/distribution/licenses/lucene-backward-codecs-5.5.0.jar.sha1 deleted file mode 100644 index dd5c846363a..00000000000 --- a/distribution/licenses/lucene-backward-codecs-5.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -68480974b2f54f519763632a7c1c5d51cbff3805 \ No newline at end of file diff --git a/distribution/licenses/lucene-backward-codecs-6.0.0-snapshot-f0aa4fc.jar.sha1 b/distribution/licenses/lucene-backward-codecs-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..28cdb1db9b1 --- /dev/null +++ b/distribution/licenses/lucene-backward-codecs-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +f5bbdd01b98fab7c18b46e762de3e39221b0c8fc \ No newline at end of file diff --git a/distribution/licenses/lucene-core-5.5.0.jar.sha1 b/distribution/licenses/lucene-core-5.5.0.jar.sha1 deleted file mode 100644 index 70bd0b63bba..00000000000 --- a/distribution/licenses/lucene-core-5.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a74fd869bb5ad7fe6b4cd29df9543a34aea81164 \ No newline at end of file diff --git a/distribution/licenses/lucene-core-6.0.0-snapshot-f0aa4fc.jar.sha1 b/distribution/licenses/lucene-core-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..c304106975b --- /dev/null +++ b/distribution/licenses/lucene-core-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +18ad74518b34af7cfbd6c1e3a408920ff7665501 \ No newline at end of file diff --git a/distribution/licenses/lucene-grouping-5.5.0.jar.sha1 b/distribution/licenses/lucene-grouping-5.5.0.jar.sha1 deleted file mode 100644 index f905a2081b6..00000000000 --- a/distribution/licenses/lucene-grouping-5.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -437cacec0cfa349b1dee049a7c0e32df3b8ecc07 \ No newline at end of file diff --git a/distribution/licenses/lucene-grouping-6.0.0-snapshot-f0aa4fc.jar.sha1 b/distribution/licenses/lucene-grouping-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..a95cc29cc7d --- /dev/null +++ b/distribution/licenses/lucene-grouping-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +dc0b211e31b8f1e0ee3a9e8f9c71b13fa088dabf \ No newline at end of file diff --git a/distribution/licenses/lucene-highlighter-5.5.0.jar.sha1 b/distribution/licenses/lucene-highlighter-5.5.0.jar.sha1 deleted file mode 100644 index 6ea3c5a0c13..00000000000 --- a/distribution/licenses/lucene-highlighter-5.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ecdd913cb7c61a5435591f0a7268b01ab3fc782a \ No newline at end of file diff --git a/distribution/licenses/lucene-highlighter-6.0.0-snapshot-f0aa4fc.jar.sha1 b/distribution/licenses/lucene-highlighter-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..8f57bb02639 --- /dev/null +++ b/distribution/licenses/lucene-highlighter-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +bbd503396c08546f1b9e023e77dbf25bbb052d1c \ No newline at end of file diff --git a/distribution/licenses/lucene-join-5.5.0.jar.sha1 b/distribution/licenses/lucene-join-5.5.0.jar.sha1 deleted file mode 100644 index 3cc19b170ed..00000000000 --- a/distribution/licenses/lucene-join-5.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -af4f55e36e3a7d1f4e9ed9efdccf7e22b767d6e8 \ No newline at end of file diff --git a/distribution/licenses/lucene-join-6.0.0-snapshot-f0aa4fc.jar.sha1 b/distribution/licenses/lucene-join-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..835bac49233 --- /dev/null +++ b/distribution/licenses/lucene-join-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +96fd93d4a4192c42b0d56198b73a25440d4db2f7 \ No newline at end of file diff --git a/distribution/licenses/lucene-memory-5.5.0.jar.sha1 b/distribution/licenses/lucene-memory-5.5.0.jar.sha1 deleted file mode 100644 index 1f4ebc783ee..00000000000 --- a/distribution/licenses/lucene-memory-5.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -09a327fe9f20fc7e3912ed213bdd5cb4b6d2a65a \ No newline at end of file diff --git a/distribution/licenses/lucene-memory-6.0.0-snapshot-f0aa4fc.jar.sha1 b/distribution/licenses/lucene-memory-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..1e392d3e246 --- /dev/null +++ b/distribution/licenses/lucene-memory-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +ddd44a319d201ff73cd25be139bd3175226ab5a5 \ No newline at end of file diff --git a/distribution/licenses/lucene-misc-5.5.0.jar.sha1 b/distribution/licenses/lucene-misc-5.5.0.jar.sha1 deleted file mode 100644 index 76131ae81c5..00000000000 --- a/distribution/licenses/lucene-misc-5.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -504d855a1a38190622fdf990b2298c067e7d60ca \ No newline at end of file diff --git a/distribution/licenses/lucene-misc-6.0.0-snapshot-f0aa4fc.jar.sha1 b/distribution/licenses/lucene-misc-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..a21aaef33f5 --- /dev/null +++ b/distribution/licenses/lucene-misc-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +07d943ecdc552632bdca8f2772fd081a02cbf589 \ No newline at end of file diff --git a/distribution/licenses/lucene-queries-5.5.0.jar.sha1 b/distribution/licenses/lucene-queries-5.5.0.jar.sha1 deleted file mode 100644 index 5790b2e4776..00000000000 --- a/distribution/licenses/lucene-queries-5.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -60ca161c1dd5f127907423b6f039b846fb713de0 \ No newline at end of file diff --git a/distribution/licenses/lucene-queries-6.0.0-snapshot-f0aa4fc.jar.sha1 b/distribution/licenses/lucene-queries-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..57fb022de53 --- /dev/null +++ b/distribution/licenses/lucene-queries-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +66c72fd979f54480af75d01719ef25da62c0a8b6 \ No newline at end of file diff --git a/distribution/licenses/lucene-queryparser-5.5.0.jar.sha1 b/distribution/licenses/lucene-queryparser-5.5.0.jar.sha1 deleted file mode 100644 index 8e4a1e66138..00000000000 --- a/distribution/licenses/lucene-queryparser-5.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0fddc49725b562fd48dff0cff004336ad2a090a4 \ No newline at end of file diff --git a/distribution/licenses/lucene-queryparser-6.0.0-snapshot-f0aa4fc.jar.sha1 b/distribution/licenses/lucene-queryparser-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..5c311c4bd9b --- /dev/null +++ b/distribution/licenses/lucene-queryparser-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +8992204f922fe52af557e691cbfb4c54f92b76bd \ No newline at end of file diff --git a/distribution/licenses/lucene-sandbox-5.5.0.jar.sha1 b/distribution/licenses/lucene-sandbox-5.5.0.jar.sha1 deleted file mode 100644 index 20c2a1c9527..00000000000 --- a/distribution/licenses/lucene-sandbox-5.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b7da8e187acd6e4d7781ba41fac8b9082dd27409 \ No newline at end of file diff --git a/distribution/licenses/lucene-sandbox-6.0.0-snapshot-f0aa4fc.jar.sha1 b/distribution/licenses/lucene-sandbox-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..20f0037ea31 --- /dev/null +++ b/distribution/licenses/lucene-sandbox-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +8565264e00bc43e1226ff0d2e986dbb26d353ce2 \ No newline at end of file diff --git a/distribution/licenses/lucene-spatial-5.5.0.jar.sha1 b/distribution/licenses/lucene-spatial-5.5.0.jar.sha1 deleted file mode 100644 index dd645be87e3..00000000000 --- a/distribution/licenses/lucene-spatial-5.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c14965bf67179bee93cc8efc58d09a75d230c891 \ No newline at end of file diff --git a/distribution/licenses/lucene-spatial-6.0.0-snapshot-f0aa4fc.jar.sha1 b/distribution/licenses/lucene-spatial-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..6a909857945 --- /dev/null +++ b/distribution/licenses/lucene-spatial-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +98fc1bb7e005f33c388be66486341ad8168b72eb \ No newline at end of file diff --git a/distribution/licenses/lucene-spatial-extras-6.0.0-snapshot-f0aa4fc.jar.sha1 b/distribution/licenses/lucene-spatial-extras-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..b741ccd62a7 --- /dev/null +++ b/distribution/licenses/lucene-spatial-extras-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +b5b651b0adbc2f404e091817282dabd7b432c677 \ No newline at end of file diff --git a/distribution/licenses/lucene-spatial3d-5.5.0.jar.sha1 b/distribution/licenses/lucene-spatial3d-5.5.0.jar.sha1 deleted file mode 100644 index c0b9d4ba838..00000000000 --- a/distribution/licenses/lucene-spatial3d-5.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3e5ab4ea3e2052166100482f7a56b75bfa4ab0ad \ No newline at end of file diff --git a/distribution/licenses/lucene-spatial3d-6.0.0-snapshot-f0aa4fc.jar.sha1 b/distribution/licenses/lucene-spatial3d-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..512e4b7b592 --- /dev/null +++ b/distribution/licenses/lucene-spatial3d-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +334e194bf83c75f0ae165e3e72b6fa35c5d636c5 \ No newline at end of file diff --git a/distribution/licenses/lucene-suggest-5.5.0.jar.sha1 b/distribution/licenses/lucene-suggest-5.5.0.jar.sha1 deleted file mode 100644 index adce0756ecf..00000000000 --- a/distribution/licenses/lucene-suggest-5.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -51f9d52332f556976a5099817e35d37c69a24597 \ No newline at end of file diff --git a/distribution/licenses/lucene-suggest-6.0.0-snapshot-f0aa4fc.jar.sha1 b/distribution/licenses/lucene-suggest-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..3d2cf156d40 --- /dev/null +++ b/distribution/licenses/lucene-suggest-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +89c46e9601cf8fb9acf77398838f8710c9e44053 \ No newline at end of file diff --git a/distribution/licenses/spatial4j-0.5.jar.sha1 b/distribution/licenses/spatial4j-0.5.jar.sha1 deleted file mode 100644 index 4bcf7a33b15..00000000000 --- a/distribution/licenses/spatial4j-0.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6e16edaf6b1ba76db7f08c2f3723fce3b358ecc3 \ No newline at end of file diff --git a/distribution/licenses/spatial4j-0.6.jar.sha1 b/distribution/licenses/spatial4j-0.6.jar.sha1 new file mode 100644 index 00000000000..740a25b1c90 --- /dev/null +++ b/distribution/licenses/spatial4j-0.6.jar.sha1 @@ -0,0 +1 @@ +21b15310bddcfd8c72611c180f20cf23279809a3 \ No newline at end of file diff --git a/distribution/rpm/src/main/packaging/init.d/elasticsearch b/distribution/rpm/src/main/packaging/init.d/elasticsearch index 12fed7dbc33..c68a5b65f3f 100644 --- a/distribution/rpm/src/main/packaging/init.d/elasticsearch +++ b/distribution/rpm/src/main/packaging/init.d/elasticsearch @@ -66,6 +66,7 @@ export ES_JAVA_OPTS export ES_GC_LOG_FILE export ES_STARTUP_SLEEP_TIME export JAVA_HOME +export ES_INCLUDE lockfile=/var/lock/subsys/$prog @@ -116,7 +117,7 @@ start() { cd $ES_HOME echo -n $"Starting $prog: " # if not running, start it up here, usually something like "daemon $exec" - daemon --user $ES_USER --pidfile $pidfile $exec -p $pidfile -d -Des.default.path.home=$ES_HOME -Des.default.path.logs=$LOG_DIR -Des.default.path.data=$DATA_DIR -Des.default.path.conf=$CONF_DIR + daemon --user $ES_USER --pidfile $pidfile $exec -p $pidfile -d -Ees.default.path.home=$ES_HOME -Ees.default.path.logs=$LOG_DIR -Ees.default.path.data=$DATA_DIR -Ees.default.path.conf=$CONF_DIR retval=$? echo [ $retval -eq 0 ] && touch $lockfile diff --git a/distribution/src/main/packaging/systemd/elasticsearch.service b/distribution/src/main/packaging/systemd/elasticsearch.service index 301586c1038..1aed30ac968 100644 --- a/distribution/src/main/packaging/systemd/elasticsearch.service +++ b/distribution/src/main/packaging/systemd/elasticsearch.service @@ -20,11 +20,10 @@ Group=elasticsearch ExecStartPre=/usr/share/elasticsearch/bin/elasticsearch-systemd-pre-exec ExecStart=/usr/share/elasticsearch/bin/elasticsearch \ - -Des.pidfile=${PID_DIR}/elasticsearch.pid \ - -Des.default.path.home=${ES_HOME} \ - -Des.default.path.logs=${LOG_DIR} \ - -Des.default.path.data=${DATA_DIR} \ - -Des.default.path.conf=${CONF_DIR} + -p ${PID_DIR}/elasticsearch.pid \ + -Ees.default.path.logs=${LOG_DIR} \ + -Ees.default.path.data=${DATA_DIR} \ + -Ees.default.path.conf=${CONF_DIR} StandardOutput=journal StandardError=inherit diff --git a/distribution/src/main/resources/bin/elasticsearch b/distribution/src/main/resources/bin/elasticsearch index b15105f1854..253ee1ee1f5 100755 --- a/distribution/src/main/resources/bin/elasticsearch +++ b/distribution/src/main/resources/bin/elasticsearch @@ -127,10 +127,10 @@ export HOSTNAME daemonized=`echo $* | egrep -- '(^-d |-d$| -d |--daemonize$|--daemonize )'` if [ -z "$daemonized" ] ; then exec "$JAVA" $JAVA_OPTS $ES_JAVA_OPTS -Des.path.home="$ES_HOME" -cp "$ES_CLASSPATH" \ - org.elasticsearch.bootstrap.Elasticsearch start "$@" + org.elasticsearch.bootstrap.Elasticsearch "$@" else exec "$JAVA" $JAVA_OPTS $ES_JAVA_OPTS -Des.path.home="$ES_HOME" -cp "$ES_CLASSPATH" \ - org.elasticsearch.bootstrap.Elasticsearch start "$@" <&- & + org.elasticsearch.bootstrap.Elasticsearch "$@" <&- & retval=$? pid=$! [ $retval -eq 0 ] || exit $retval diff --git a/distribution/src/main/resources/bin/elasticsearch.bat b/distribution/src/main/resources/bin/elasticsearch.bat index a0079fc967c..4da9a5d032a 100644 --- a/distribution/src/main/resources/bin/elasticsearch.bat +++ b/distribution/src/main/resources/bin/elasticsearch.bat @@ -43,6 +43,6 @@ IF ERRORLEVEL 1 ( EXIT /B %ERRORLEVEL% ) -"%JAVA_HOME%\bin\java" %JAVA_OPTS% %ES_JAVA_OPTS% %ES_PARAMS% !newparams! -cp "%ES_CLASSPATH%" "org.elasticsearch.bootstrap.Elasticsearch" start +"%JAVA_HOME%\bin\java" %JAVA_OPTS% %ES_JAVA_OPTS% %ES_PARAMS% -cp "%ES_CLASSPATH%" "org.elasticsearch.bootstrap.Elasticsearch" start !newparams! ENDLOCAL diff --git a/distribution/src/main/resources/bin/elasticsearch.in.bat b/distribution/src/main/resources/bin/elasticsearch.in.bat index 7138cf5f5ca..537df9d4f9f 100644 --- a/distribution/src/main/resources/bin/elasticsearch.in.bat +++ b/distribution/src/main/resources/bin/elasticsearch.in.bat @@ -85,6 +85,10 @@ REM JAVA_OPTS=%JAVA_OPTS% -XX:HeapDumpPath=$ES_HOME/logs/heapdump.hprof REM Disables explicit GC set JAVA_OPTS=%JAVA_OPTS% -XX:+DisableExplicitGC +REM Enable pre-touching of memory pages used by the JVM during hotspot +REM initialization +set JAVA_OPTS=%JAVA_OPTS% -XX:+AlwaysPreTouch + REM Ensure UTF-8 encoding by default (e.g. filenames) set JAVA_OPTS=%JAVA_OPTS% -Dfile.encoding=UTF-8 @@ -100,4 +104,4 @@ ECHO additional elements via the plugin mechanism, or if code must really be 1>& ECHO added to the main classpath, add jars to lib\, unsupported 1>&2 EXIT /B 1 ) -set ES_PARAMS=-Delasticsearch -Des-foreground=yes -Des.path.home="%ES_HOME%" +set ES_PARAMS=-Delasticsearch -Des.path.home="%ES_HOME%" diff --git a/distribution/src/main/resources/bin/elasticsearch.in.sh b/distribution/src/main/resources/bin/elasticsearch.in.sh index f859a06ffab..69d2fc94112 100644 --- a/distribution/src/main/resources/bin/elasticsearch.in.sh +++ b/distribution/src/main/resources/bin/elasticsearch.in.sh @@ -81,6 +81,10 @@ JAVA_OPTS="$JAVA_OPTS -XX:+HeapDumpOnOutOfMemoryError" # Disables explicit GC JAVA_OPTS="$JAVA_OPTS -XX:+DisableExplicitGC" +# Enable pre-touching of memory pages used by the JVM during hotspot +# initialization +JAVA_OPTS="$JAVA_OPTS -XX:+AlwaysPreTouch" + # Ensure UTF-8 encoding by default (e.g. filenames) JAVA_OPTS="$JAVA_OPTS -Dfile.encoding=UTF-8" diff --git a/distribution/src/main/resources/bin/service.bat b/distribution/src/main/resources/bin/service.bat index 22242e36ff9..2786c87a634 100644 --- a/distribution/src/main/resources/bin/service.bat +++ b/distribution/src/main/resources/bin/service.bat @@ -152,7 +152,7 @@ if "%DATA_DIR%" == "" set DATA_DIR=%ES_HOME%\data if "%CONF_DIR%" == "" set CONF_DIR=%ES_HOME%\config -set ES_PARAMS=-Delasticsearch;-Des.path.home="%ES_HOME%";-Des.default.path.home="%ES_HOME%";-Des.default.path.logs="%LOG_DIR%";-Des.default.path.data="%DATA_DIR%";-Des.default.path.conf="%CONF_DIR%" +set ES_PARAMS=-Delasticsearch;-Des.path.home="%ES_HOME%";-Des.default.path.logs="%LOG_DIR%";-Des.default.path.data="%DATA_DIR%";-Des.default.path.conf="%CONF_DIR%" set JVM_OPTS=%JAVA_OPTS: =;% diff --git a/distribution/src/main/resources/config/logging.yml b/distribution/src/main/resources/config/logging.yml index 939aa1eed0e..187e79cffa0 100644 --- a/distribution/src/main/resources/config/logging.yml +++ b/distribution/src/main/resources/config/logging.yml @@ -1,4 +1,4 @@ -# you can override this using by setting a system property, for example -Des.logger.level=DEBUG +# you can override this using by setting a system property, for example -Ees.logger.level=DEBUG es.logger.level: INFO rootLogger: ${es.logger.level}, console, file logger: diff --git a/docs/java-api/client.asciidoc b/docs/java-api/client.asciidoc index 43cf3c148a6..75a58f3c6b5 100644 --- a/docs/java-api/client.asciidoc +++ b/docs/java-api/client.asciidoc @@ -12,12 +12,16 @@ Obtaining an elasticsearch `Client` is simple. The most common way to get a client is by creating a <> that connects to a cluster. -*Important:* -______________________________________________________________________________________________________________________________________________________________ -Please note that you are encouraged to use the same version on client -and cluster sides. You may hit some incompatibility issues when mixing -major versions. -______________________________________________________________________________________________________________________________________________________________ +[IMPORTANT] +============================== + +The client must have the same major version (e.g. `2.x`, or `5.x`) as the +nodes in the cluster. Clients may connect to clusters which have a different +minor version (e.g. `2.3.x`) but it is possible that new funcionality may not +be supported. Ideally, the client should have the same version as the +cluster. + +============================== [[transport-client]] @@ -53,11 +57,23 @@ Client client = TransportClient.builder().settings(settings).build(); //Add transport addresses and do something with the client... -------------------------------------------------- -The client allows sniffing the rest of the cluster, which adds data nodes -into its list of machines to use. In this case, note that the IP addresses -used will be the ones that the other nodes were started with (the -"publish" address). In order to enable it, set the -`client.transport.sniff` to `true`: +The Transport client comes with a cluster sniffing feature which +allows it to dynamically add new hosts and remove old ones. +When sniffing is enabled the the transport client will connect to the nodes in its +internal node list, which is built via calls to addTransportAddress. +After this, the client will call the internal cluster state API on those nodes +to discover available data nodes. The internal node list of the client will +be replaced with those data nodes only. This list is refreshed every five seconds by default. +Note that the IP addresses the sniffer connects to are the ones declared as the 'publish' +address in those node's elasticsearch config. + +Keep in mind that list might possibly not include the original node it connected to +if that node is not a data node. If, for instance, you initially connect to a +master node, after sniffing no further requests will go to that master node, +but rather to any data nodes instead. The reason the transport excludes non-data +nodes is to avoid sending search traffic to master only nodes. + +In order to enable sniffing, set `client.transport.sniff` to `true`: [source,java] -------------------------------------------------- diff --git a/docs/java-api/index.asciidoc b/docs/java-api/index.asciidoc index 012633f1e4b..fbd3fe33165 100644 --- a/docs/java-api/index.asciidoc +++ b/docs/java-api/index.asciidoc @@ -142,8 +142,6 @@ include::search.asciidoc[] include::aggs.asciidoc[] -include::percolate.asciidoc[] - include::query-dsl.asciidoc[] include::indexed-scripts.asciidoc[] diff --git a/docs/java-api/query-dsl/geo-shape-query.asciidoc b/docs/java-api/query-dsl/geo-shape-query.asciidoc index c753cd72c1a..e08410acbdb 100644 --- a/docs/java-api/query-dsl/geo-shape-query.asciidoc +++ b/docs/java-api/query-dsl/geo-shape-query.asciidoc @@ -10,9 +10,9 @@ to your classpath in order to use this type: [source,xml] ----------------------------------------------- - com.spatial4j + org.locationtech.spatial4j spatial4j - 0.4.1 <1> + 0.6 <1> @@ -27,7 +27,7 @@ to your classpath in order to use this type: ----------------------------------------------- -<1> check for updates in http://search.maven.org/#search%7Cga%7C1%7Cg%3A%22com.spatial4j%22%20AND%20a%3A%22spatial4j%22[Maven Central] +<1> check for updates in http://search.maven.org/#search%7Cga%7C1%7Cg%3A%22org.locationtech.spatial4j%22%20AND%20a%3A%22spatial4j%22[Maven Central] <2> check for updates in http://search.maven.org/#search%7Cga%7C1%7Cg%3A%22com.vividsolutions%22%20AND%20a%3A%22jts%22[Maven Central] [source,java] diff --git a/docs/java-api/percolate.asciidoc b/docs/java-api/query-dsl/percolator-query.asciidoc similarity index 63% rename from docs/java-api/percolate.asciidoc rename to docs/java-api/query-dsl/percolator-query.asciidoc index a08e09b2afe..04f3ba9cb8d 100644 --- a/docs/java-api/percolate.asciidoc +++ b/docs/java-api/query-dsl/percolator-query.asciidoc @@ -1,12 +1,8 @@ -[[percolate]] -== Percolate API +[[java-query-percolator-query]] +==== Percolator query -The percolator allows one to register queries against an index, and then -send `percolate` requests which include a doc, getting back the -queries that match on that doc out of the set of registered queries. - -Read the main {ref}/search-percolate.html[percolate] -documentation before reading this guide. +See: + * {ref}/query-dsl-percolator-query.html[Percolator Query] [source,java] -------------------------------------------------- @@ -37,14 +33,12 @@ docBuilder.field("doc").startObject(); //This is needed to designate the documen docBuilder.field("content", "This is amazing!"); docBuilder.endObject(); //End of the doc field docBuilder.endObject(); //End of the JSON root object -//Percolate -PercolateResponse response = client.preparePercolate() - .setIndices("myIndexName") - .setDocumentType("myDocumentType") - .setSource(docBuilder).execute().actionGet(); +// Percolate, by executing the percolator query in the query dsl: +SearchResponse response = client().prepareSearch("myIndexName") + .setQuery(QueryBuilders.percolatorQuery("myDocumentType", docBuilder.bytes())) + .get(); //Iterate over the results -for(PercolateResponse.Match match : response) { - //Handle the result which is the name of - //the query in the percolator +for(SearchHit hit : response.getHits()) { + // Percolator queries as hit } -------------------------------------------------- diff --git a/docs/java-api/query-dsl/special-queries.asciidoc b/docs/java-api/query-dsl/special-queries.asciidoc index 0df5af890ec..d5c9db07391 100644 --- a/docs/java-api/query-dsl/special-queries.asciidoc +++ b/docs/java-api/query-dsl/special-queries.asciidoc @@ -27,3 +27,5 @@ include::template-query.asciidoc[] include::script-query.asciidoc[] +include::percolator-query.asciidoc[] + diff --git a/docs/plugins/authors.asciidoc b/docs/plugins/authors.asciidoc index d0a606987ae..af3710f7e2d 100644 --- a/docs/plugins/authors.asciidoc +++ b/docs/plugins/authors.asciidoc @@ -26,9 +26,7 @@ https://github.com/elastic/elasticsearch/blob/master/buildSrc/src/main/resources Either fill in this template yourself (see https://github.com/lmenezes/elasticsearch-kopf/blob/master/plugin-descriptor.properties[elasticsearch-kopf] as an example) or, if you are using Elasticsearch's Gradle build system, you -can fill in the necessary values in the `build.gradle` file for your plugin. For -instance, see -https://github.com/elastic/elasticsearch/blob/master/plugins/site-example/build.gradle[`/plugins/site-example/build.gradle`]. +can fill in the necessary values in the `build.gradle` file for your plugin. [float] ==== Mandatory elements for plugins diff --git a/docs/plugins/index.asciidoc b/docs/plugins/index.asciidoc index 598e7872dcd..fa05668c131 100644 --- a/docs/plugins/index.asciidoc +++ b/docs/plugins/index.asciidoc @@ -67,3 +67,6 @@ include::integrations.asciidoc[] include::authors.asciidoc[] +include::redirects.asciidoc[] + + diff --git a/docs/plugins/mapper-attachments.asciidoc b/docs/plugins/mapper-attachments.asciidoc index ed992623a50..e65ab9ff64a 100644 --- a/docs/plugins/mapper-attachments.asciidoc +++ b/docs/plugins/mapper-attachments.asciidoc @@ -176,7 +176,7 @@ need to specify the `type` (like `string` or `date`) since it is already known. [[mapper-attachments-copy-to]] ==== Copy To feature -If you want to use http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/mapping-core-types.html#copy-to[copy_to] +If you want to use https://www.elastic.co/guide/en/elasticsearch/reference/current/copy-to.html[copy_to] feature, you need to define it on each sub-field you want to copy to another field: [source,js] @@ -405,41 +405,3 @@ It gives back: } } -------------------------- - -[[mapper-attachments-standalone]] -==== Stand alone runner - -If you want to run some tests within your IDE, you can use `StandaloneRunner` class. -It accepts arguments: - -* `-u file://URL/TO/YOUR/DOC` -* `--size` set extracted size (default to mapper attachment size) -* `BASE64` encoded binary - -Example: - -[source,sh] --------------------------- -StandaloneRunner BASE64Text -StandaloneRunner -u /tmp/mydoc.pdf -StandaloneRunner -u /tmp/mydoc.pdf --size 1000000 --------------------------- - -It produces something like: - -[source,text] --------------------------- -## Extracted text ---------------------- BEGIN ----------------------- -This is the extracted text ----------------------- END ------------------------ -## Metadata -- author: null -- content_length: null -- content_type: application/pdf -- date: null -- keywords: null -- language: null -- name: null -- title: null --------------------------- diff --git a/docs/plugins/plugin-script.asciidoc b/docs/plugins/plugin-script.asciidoc index d2e57b2efc8..fba4704ab97 100644 --- a/docs/plugins/plugin-script.asciidoc +++ b/docs/plugins/plugin-script.asciidoc @@ -167,7 +167,7 @@ can do this as follows: [source,sh] --------------------- -sudo bin/elasticsearch-plugin -Des.path.conf=/path/to/custom/config/dir install +sudo bin/elasticsearch-plugin -Ees.path.conf=/path/to/custom/config/dir install --------------------- You can also set the `CONF_DIR` environment variable to the custom config diff --git a/docs/plugins/redirects.asciidoc b/docs/plugins/redirects.asciidoc new file mode 100644 index 00000000000..caf2008e521 --- /dev/null +++ b/docs/plugins/redirects.asciidoc @@ -0,0 +1,40 @@ +["appendix",role="exclude",id="redirects"] += Deleted pages + +The following pages have moved or been deleted. + +[role="exclude",id="discovery-multicast"] +=== Multicast Discovery Plugin + +The `multicast-discovery` plugin has been removed. Instead, configure networking +using unicast (see {ref}/modules-network.html[Network settings]) or using +one of the <>. + +[role="exclude",id="cloud-aws"] +=== AWS Cloud Plugin + +The `cloud-aws` plugin has been split into two separate plugins: + +* <> (`discovery-ec2`) +* <> (`repository-s3`) + + +[role="exclude",id="cloud-azure"] +=== Azure Cloud Plugin + +The `cloud-azure` plugin has been split into two separate plugins: + +* <> (`discovery-azure`) +* <> (`repository-azure`) + + +[role="exclude",id="cloud-gce"] +=== GCE Cloud Plugin + +The `cloud-gce` plugin has been renamed to <> (`discovery-gce`). + + + + + + diff --git a/docs/reference/aggregations/bucket/daterange-aggregation.asciidoc b/docs/reference/aggregations/bucket/daterange-aggregation.asciidoc index 8b1f58f7ff0..a69b2b3cb11 100644 --- a/docs/reference/aggregations/bucket/daterange-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/daterange-aggregation.asciidoc @@ -111,3 +111,35 @@ Zone:: 'Z' outputs offset without a colon, 'ZZ' outputs the offset with a colon, Zone names:: Time zone names ('z') cannot be parsed. Any characters in the pattern that are not in the ranges of ['a'..'z'] and ['A'..'Z'] will be treated as quoted text. For instance, characters like ':', '.', ' ', '#' and '?' will appear in the resulting time text even they are not embraced within single quotes. + +[[time-zones]] +==== Time zone in date range aggregations + +Dates can be converted from another time zone to UTC by specifying the `time_zone` parameter. + +Time zones may either be specified as an ISO 8601 UTC offset (e.g. +01:00 or -08:00) or as one of +the http://www.joda.org/joda-time/timezones.html[time zone ids] from the TZ database. + +The `time_zone` parameter is also applied to rounding in date math expressions. As an example, +to round to the beginning of the day in the CET time zone, you can do the following: + +[source,js] +-------------------------------------------------- +{ + "aggs": { + "range": { + "date_range": { + "field": "date", + "time_zone": "CET", + "ranges": [ + { "to": "2016-02-15/d" }, <1> + { "from": "2016-02-15/d", "to" : "now/d" <2>}, + { "from": "now/d" }, + ] + } + } + } + } +-------------------------------------------------- +<1> This date will be converted to `2016-02-15T00:00:00.000+01:00`. +<2> `now/d` will be rounded to the beginning of the day in the CET time zone. diff --git a/docs/reference/analysis/tokenfilters/compound-word-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/compound-word-tokenfilter.asciidoc index f77c47d156e..1268727b2ef 100644 --- a/docs/reference/analysis/tokenfilters/compound-word-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/compound-word-tokenfilter.asciidoc @@ -27,7 +27,8 @@ use. For languages like German they are quite good. XML based hyphenation grammar files can be found in the http://offo.sourceforge.net/hyphenation/#FOP+XML+Hyphenation+Patterns[Objects For Formatting Objects] -(OFFO) Sourceforge project. You can download http://downloads.sourceforge.net/offo/offo-hyphenation.zip[offo-hyphenation.zip] +(OFFO) Sourceforge project. Currently only FOP v1.2 compatible hyphenation files +are supported. You can download https://sourceforge.net/projects/offo/files/offo-hyphenation/1.2/offo-hyphenation_v1.2.zip/download[offo-hyphenation_v1.2.zip] directly and look in the `offo-hyphenation/hyph/` directory. Credits for the hyphenation code go to the Apache FOP project . diff --git a/docs/reference/analysis/tokenizers/standard-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/standard-tokenizer.asciidoc index c8b405bf820..42dbe5a864a 100644 --- a/docs/reference/analysis/tokenizers/standard-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/standard-tokenizer.asciidoc @@ -13,6 +13,6 @@ type: |======================================================================= |Setting |Description |`max_token_length` |The maximum token length. If a token is seen that -exceeds this length then it is discarded. Defaults to `255`. +exceeds this length then it is split at `max_token_length` intervals. Defaults to `255`. |======================================================================= diff --git a/docs/reference/cluster/nodes-info.asciidoc b/docs/reference/cluster/nodes-info.asciidoc index a3072768ca6..5ed979abd0d 100644 --- a/docs/reference/cluster/nodes-info.asciidoc +++ b/docs/reference/cluster/nodes-info.asciidoc @@ -17,7 +17,7 @@ The second command selectively retrieves nodes information of only By default, it just returns all attributes and core settings for a node. It also allows to get only information on `settings`, `os`, `process`, `jvm`, -`thread_pool`, `transport`, `http` and `plugins`: +`thread_pool`, `transport`, `http`, `plugins` and `ingest`: [source,js] -------------------------------------------------- @@ -122,3 +122,71 @@ The result will look similar to: } } -------------------------------------------------- + +[float] +[[ingest-info]] +==== Ingest information + +`ingest` - if set, the result will contain details about the available +processors per node: + +* `type`: the processor type + +The result will look similar to: + +[source,js] +-------------------------------------------------- +{ + "cluster_name": "elasticsearch", + "nodes": { + "O70_wBv6S9aPPcAKdSUBtw": { + "ingest": { + "processors": [ + { + "type": "date" + }, + { + "type": "uppercase" + }, + { + "type": "set" + }, + { + "type": "lowercase" + }, + { + "type": "gsub" + }, + { + "type": "convert" + }, + { + "type": "remove" + }, + { + "type": "fail" + }, + { + "type": "foreach" + }, + { + "type": "split" + }, + { + "type": "trim" + }, + { + "type": "rename" + }, + { + "type": "join" + }, + { + "type": "append" + } + ] + } + } + } +} +-------------------------------------------------- \ No newline at end of file diff --git a/docs/reference/cluster/nodes-stats.asciidoc b/docs/reference/cluster/nodes-stats.asciidoc index bcef61d4ef7..8a2f27112b5 100644 --- a/docs/reference/cluster/nodes-stats.asciidoc +++ b/docs/reference/cluster/nodes-stats.asciidoc @@ -60,6 +60,9 @@ of `indices`, `os`, `process`, `jvm`, `transport`, `http`, `discovery`:: Statistics about the discovery +`ingest`:: + Statistics about ingest preprocessing + [source,js] -------------------------------------------------- # return indices and os @@ -227,3 +230,23 @@ curl -XGET 'http://localhost:9200/_nodes/stats?pretty&groups=_all' # Some groups from just the indices stats curl -XGET 'http://localhost:9200/_nodes/stats/indices?pretty&groups=foo,bar' -------------------------------------------------- + +[float] +[[ingest-stats]] +=== Ingest statistics + +The `ingest` flag can be set to retrieve statistics that concern ingest: + +`ingest.total.count`:: + The total number of document ingested during the lifetime of this node + +`ingest.total.time_in_millis`:: + The total time spent on ingest preprocessing documents during the lifetime of this node + +`ingest.total.current`:: + The total number of documents currently being ingested. + +`ingest.total.failed`:: + The total number ingest preprocessing operations failed during the lifetime of this node + +On top of these overall ingest statistics, these statistics are also provided on a per pipeline basis. \ No newline at end of file diff --git a/docs/reference/cluster/update-settings.asciidoc b/docs/reference/cluster/update-settings.asciidoc index 8ec58424730..d0b127cd352 100644 --- a/docs/reference/cluster/update-settings.asciidoc +++ b/docs/reference/cluster/update-settings.asciidoc @@ -83,6 +83,16 @@ Cluster wide settings can be returned using: curl -XGET localhost:9200/_cluster/settings -------------------------------------------------- +[float] +=== Precedence of settings + +Transient cluster settings take precedence over persistent cluster settings, +which take precedence over settings configured in the `elasticsearch.yml` +config file. + +For this reason it is preferrable to use the `elasticsearch.yml` file only +for local configurations, and set all cluster-wider settings with the +`settings` API. A list of dynamically updatable settings can be found in the <> documentation. diff --git a/docs/reference/docs.asciidoc b/docs/reference/docs.asciidoc index a35fa4c4a89..465d2e60c77 100644 --- a/docs/reference/docs.asciidoc +++ b/docs/reference/docs.asciidoc @@ -29,10 +29,14 @@ include::docs/delete.asciidoc[] include::docs/update.asciidoc[] +include::docs/update-by-query.asciidoc[] + include::docs/multi-get.asciidoc[] include::docs/bulk.asciidoc[] +include::docs/reindex.asciidoc[] + include::docs/termvectors.asciidoc[] include::docs/multi-termvectors.asciidoc[] diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index c8d59cb2477..8173503054f 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -1,5 +1,5 @@ [[docs-reindex]] -==== Reindex API +== Reindex API `_reindex`'s most basic form just copies documents from one index to another. This will copy documents from `twitter` into `new_twitter`: @@ -277,7 +277,7 @@ POST /_reindex -------------------------------------------------- // AUTOSENSE -Reindex can also use the link:ingest.html[Ingest] feature by specifying a +Reindex can also use the <> feature by specifying a `pipeline` like this: [source,js] @@ -420,9 +420,7 @@ will finish when their sum is equal to the `total` field. [float] -=== Examples - -==== Change the name of a field +=== Reindex to change the name of a field `_reindex` can be used to build a copy of an index with renamed fields. Say you create an index containing documents that look like this: diff --git a/docs/reference/docs/update-by-query.asciidoc b/docs/reference/docs/update-by-query.asciidoc index 9aac11b2c86..13b5f6fc0eb 100644 --- a/docs/reference/docs/update-by-query.asciidoc +++ b/docs/reference/docs/update-by-query.asciidoc @@ -1,5 +1,5 @@ [[docs-update-by-query]] -==== Update By Query API +== Update By Query API The simplest usage of `_update_by_query` just performs an update on every document in the index without changing the source. This is useful to @@ -101,8 +101,8 @@ Just as in {ref}/docs-update.html[Update API] you can set `ctx.op = "noop"` if your script decides that it doesn't have to make any changes. That will cause `_update_by_query` to omit that document from its updates. Setting `ctx.op` to anything else is an error. If you want to delete by a query you can use the -<> instead. Setting any other -field in `ctx` is an error. +{plugins}/plugins-delete-by-query.html[Delete by Query plugin] instead. Setting any +other field in `ctx` is an error. Note that we stopped specifying `conflicts=proceed`. In this case we want a version conflict to abort the process so we can handle the failure. @@ -138,7 +138,7 @@ POST /twitter/_update_by_query?scroll_size=1000 -------------------------------------------------- // AUTOSENSE -`_update_by_query` can also use the link:ingest.html[Ingest] feature by +`_update_by_query` can also use the <> feature by specifying a `pipeline` like this: [source,js] @@ -267,11 +267,8 @@ progress by adding the `updated`, `created`, and `deleted` fields. The request will finish when their sum is equal to the `total` field. -[float] -=== Examples - [[picking-up-a-new-property]] -==== Pick up a new property +=== Pick up a new property Say you created an index without dynamic mapping, filled it with data, and then added a mapping value to pick up more fields from the data: diff --git a/docs/reference/docs/update.asciidoc b/docs/reference/docs/update.asciidoc index 634bc23d6ac..316714259e0 100644 --- a/docs/reference/docs/update.asciidoc +++ b/docs/reference/docs/update.asciidoc @@ -251,5 +251,15 @@ sure the document doesn't change during the update. You can use the `version` parameter to specify that the document should only be updated if its version matches the one specified. By setting version type to `force` you can force the new version of the document after update (use with care! with `force` -there is no guarantee the document didn't change).Version types `external` & -`external_gte` are not supported. +there is no guarantee the document didn't change). + +[NOTE] +.The update API does not support external versioning +===================================================== + +External versioning (version types `external` & `external_gte`) is not +supported by the update API as it would result in Elasticsearch version +numbers being out of sync with the external system. Use the +<> instead. + +===================================================== diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index 8ff832c673f..47bcb3031ff 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -163,7 +163,7 @@ As mentioned previously, we can override either the cluster or node name. This c [source,sh] -------------------------------------------------- -./elasticsearch --cluster.name my_cluster_name --node.name my_node_name +./elasticsearch -Ees.cluster.name=my_cluster_name -Ees.node.name=my_node_name -------------------------------------------------- Also note the line marked http with information about the HTTP address (`192.168.8.112`) and port (`9200`) that our node is reachable from. By default, Elasticsearch uses port `9200` to provide access to its REST API. This port is configurable if necessary. diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index 7d6614342b5..f7e1f68dec5 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -148,6 +148,10 @@ Other index settings are available in index modules: Enable or disable dynamic mapping for an index. +<>:: + + Control over how shards are merged by the background merge process. + <>:: Configure custom similarity settings to customize how search results are @@ -173,6 +177,8 @@ include::index-modules/allocation.asciidoc[] include::index-modules/mapper.asciidoc[] +include::index-modules/merge.asciidoc[] + include::index-modules/similarity.asciidoc[] include::index-modules/slowlog.asciidoc[] diff --git a/docs/reference/index-modules/allocation/filtering.asciidoc b/docs/reference/index-modules/allocation/filtering.asciidoc index 784fa1af24c..44c9b1a712c 100644 --- a/docs/reference/index-modules/allocation/filtering.asciidoc +++ b/docs/reference/index-modules/allocation/filtering.asciidoc @@ -14,7 +14,7 @@ attribute as follows: [source,sh] ------------------------ -bin/elasticsearch --node.rack rack1 --node.size big <1> +bin/elasticsearch -Ees.node.rack=rack1 -Ees.node.size=big <1> ------------------------ <1> These attribute settings can also be specified in the `elasticsearch.yml` config file. diff --git a/docs/reference/index-modules/merge.asciidoc b/docs/reference/index-modules/merge.asciidoc new file mode 100644 index 00000000000..7e5260f95d4 --- /dev/null +++ b/docs/reference/index-modules/merge.asciidoc @@ -0,0 +1,30 @@ +[[index-modules-merge]] +== Merge + +A shard in elasticsearch is a Lucene index, and a Lucene index is broken down +into segments. Segments are internal storage elements in the index where the +index data is stored, and are immutable. Smaller segments are periodically +merged into larger segments to keep the index size at bay and to expunge +deletes. + +The merge process uses auto-throttling to balance the use of hardware +resources between merging and other activities like search. + +[float] +[[merge-scheduling]] +=== Merge scheduling + +The merge scheduler (ConcurrentMergeScheduler) controls the execution of merge +operations when they are needed. Merges run in separate threads, and when the +maximum number of threads is reached, further merges will wait until a merge +thread becomes available. + +The merge scheduler supports the following _dynamic_ setting: + +`index.merge.scheduler.max_thread_count`:: + + The maximum number of threads that may be merging at once. Defaults to + `Math.max(1, Math.min(4, Runtime.getRuntime().availableProcessors() / 2))` + which works well for a good solid-state-disk (SSD). If your index is on + spinning platter drives instead, decrease this to 1. + diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index 87c400e018c..abfcb18de2e 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -47,9 +47,7 @@ include::testing.asciidoc[] include::glossary.asciidoc[] -////////////////////////////////////////// - include::release-notes.asciidoc[] -////////////////////////////////////////// +include::release-notes.asciidoc[] include::redirects.asciidoc[] diff --git a/docs/reference/indices/clearcache.asciidoc b/docs/reference/indices/clearcache.asciidoc index 21008e5b46b..8ebb9e3488a 100644 --- a/docs/reference/indices/clearcache.asciidoc +++ b/docs/reference/indices/clearcache.asciidoc @@ -2,7 +2,7 @@ == Clear Cache The clear cache API allows to clear either all caches or specific cached -associated with one ore more indices. +associated with one or more indices. [source,js] -------------------------------------------------- diff --git a/docs/reference/ingest.asciidoc b/docs/reference/ingest.asciidoc index b2486c7f12b..c565f3b2047 100644 --- a/docs/reference/ingest.asciidoc +++ b/docs/reference/ingest.asciidoc @@ -15,10 +15,10 @@ on all nodes. To disable ingest on a node, configure the following setting in th node.ingest: false -------------------------------------------------- -To pre-process documents before indexing, you <> that specifies +To pre-process documents before indexing, you <> that specifies a series of <>. Each processor transforms the document in some way. For example, you may have a pipeline that consists of one processor that removes a field from -the document followed by another processor that renames a field. +the document followed by another processor that renames a field. To use a pipeline, you simply specify the `pipeline` parameter on an index or bulk request to tell the ingest node which pipeline to use. For example: @@ -32,7 +32,7 @@ PUT /my-index/my-type/my-id?pipeline=my_pipeline_id -------------------------------------------------- // AUTOSENSE -See <> for more information about creating, adding, and deleting pipelines. +See <> for more information about creating, adding, and deleting pipelines. -- diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index 95d7005ee34..10b640dbaf1 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -1,4 +1,4 @@ -[[pipe-line]] +[[pipeline]] == Pipeline Definition A pipeline is a definition of a series of <> that are to be executed @@ -620,6 +620,23 @@ but is very useful for bookkeeping and tracing errors to specific processors. See <> to learn more about the `on_failure` field and error handling in pipelines. +The <> can be used to figure out what processors are available in a cluster. +The <> will provide a per node list of what processors are available. + +Custom processors must be installed on all nodes. The put pipeline API will fail if a processor specified in a pipeline +doesn't exist on all nodes. If you rely on custom processor plugins make sure to mark these plugins as mandatory by adding +`plugin.mandatory` setting to the `config/elasticsearch.yml` file, for example: + +[source,yaml] +-------------------------------------------------- +plugin.mandatory: ingest-attachment,ingest-geoip +-------------------------------------------------- + +A node will not start if either of these plugins are not available. + +The <> can be used to fetch ingest usage statistics, globally and on a per +pipeline basis. Useful to find out which pipelines are used the most or spent the most time on preprocessing. + [[append-procesesor]] === Append Processor Appends one or more values to an existing array if the field already exists and it is an array. diff --git a/docs/reference/mapping/params/coerce.asciidoc b/docs/reference/mapping/params/coerce.asciidoc index c9491607a6b..0121c307230 100644 --- a/docs/reference/mapping/params/coerce.asciidoc +++ b/docs/reference/mapping/params/coerce.asciidoc @@ -12,7 +12,6 @@ For instance: * Strings will be coerced to numbers. * Floating points will be truncated for integer values. -* Lon/lat geo-points will be normalized to a standard -180:180 / -90:90 coordinate system. For instance: diff --git a/docs/reference/mapping/params/store.asciidoc b/docs/reference/mapping/params/store.asciidoc index b81208aed77..46d57e9d8b5 100644 --- a/docs/reference/mapping/params/store.asciidoc +++ b/docs/reference/mapping/params/store.asciidoc @@ -1,7 +1,7 @@ [[mapping-store]] === `store` -By default, field values <> to make them searchable, +By default, field values are <> to make them searchable, but they are not _stored_. This means that the field can be queried, but the original field value cannot be retrieved. diff --git a/docs/reference/mapping/types/geo-point.asciidoc b/docs/reference/mapping/types/geo-point.asciidoc index e724a5428b2..45c5e65addb 100644 --- a/docs/reference/mapping/types/geo-point.asciidoc +++ b/docs/reference/mapping/types/geo-point.asciidoc @@ -101,17 +101,6 @@ The following parameters are accepted by `geo_point` fields: [horizontal] -<>:: - - Normalize longitude and latitude values to a standard -180:180 / -90:90 - coordinate system. Accepts `true` and `false` (default). - -<>:: - - Should the field be stored on disk in a column-stride fashion, so that it - can later be used for sorting, aggregations, or scripting? Accepts `true` - (default) or `false`. - <>:: Should the geo-point also be indexed as a geohash in the `.geohash` diff --git a/docs/reference/migration/index.asciidoc b/docs/reference/migration/index.asciidoc index 57401cb01d7..0dda43da713 100644 --- a/docs/reference/migration/index.asciidoc +++ b/docs/reference/migration/index.asciidoc @@ -8,26 +8,15 @@ your application from one version of Elasticsearch to another. As a general rule: -* Migration between major versions -- e.g. `1.x` to `2.x` -- +* Migration between minor versions -- e.g. `5.x` to `5.y` -- can be + performed by <>. + +* Migration between consecutive major versions -- e.g. `2.x` to `5.x` -- requires a <>. -* Migration between minor versions -- e.g. `1.x` to `1.y` -- can be - performed by <>. +* Migration between non-consecutive major versions -- e.g. `1.x` to `5.x` -- + is not supported. See <> for more info. -- include::migrate_5_0.asciidoc[] - -include::migrate_2_3.asciidoc[] - -include::migrate_2_2.asciidoc[] - -include::migrate_2_1.asciidoc[] - -include::migrate_2_0.asciidoc[] - -include::migrate_1_6.asciidoc[] - -include::migrate_1_4.asciidoc[] - -include::migrate_1_0.asciidoc[] diff --git a/docs/reference/migration/migrate_1_0.asciidoc b/docs/reference/migration/migrate_1_0.asciidoc deleted file mode 100644 index 1e917c4a0d9..00000000000 --- a/docs/reference/migration/migrate_1_0.asciidoc +++ /dev/null @@ -1,372 +0,0 @@ -[[breaking-changes-1.0]] -== Breaking changes in 1.0 - -This section discusses the changes that you need to be aware of when migrating -your application to Elasticsearch 1.0. - -=== System and settings - -* Elasticsearch now runs in the foreground by default. There is no more `-f` - flag on the command line. Instead, to run elasticsearch as a daemon, use - the `-d` flag: - -[source,sh] ---------------- -./bin/elasticsearch -d ---------------- - -* Command line settings can now be passed without the `-Des.` prefix, for - instance: - -[source,sh] ---------------- -./bin/elasticsearch --node.name=search_1 --cluster.name=production ---------------- - -* Elasticsearch on 64 bit Linux now uses <> by default. Make - sure that you set <> to a sufficiently high - number. The RPM and Debian packages default this value to `262144`. - -* The RPM and Debian packages no longer start Elasticsearch by default. - -* The `cluster.routing.allocation` settings (`disable_allocation`, - `disable_new_allocation` and `disable_replica_location`) have been - <>: -+ -[source,yaml] ---------------- -cluster.routing.allocation.enable: all|primaries|new_primaries|none ---------------- - -=== Stats and Info APIs - -The <>, <>, -<> and <> -APIs have all been changed to make their format more RESTful and less clumsy. - -For instance, if you just want the `nodes` section of the `cluster_state`, -instead of: - -[source,sh] ---------------- -GET /_cluster/state?filter_metadata&filter_routing_table&filter_blocks ---------------- - -you now use: - -[source,sh] ---------------- -GET /_cluster/state/nodes ---------------- - -Similarly for the `nodes_stats` API, if you want the `transport` and `http` -metrics only, instead of: - -[source,sh] ---------------- -GET /_nodes/stats?clear&transport&http ---------------- - -you now use: - -[source,sh] ---------------- -GET /_nodes/stats/transport,http ---------------- - -See the links above for full details. - - -=== Indices APIs - -The `mapping`, `alias`, `settings`, and `warmer` index APIs are all similar -but there are subtle differences in the order of the URL and the response -body. For instance, adding a mapping and a warmer look slightly different: - -[source,sh] ---------------- -PUT /{index}/{type}/_mapping -PUT /{index}/_warmer/{name} ---------------- - -These URLs have been unified as: - -[source,sh] ---------------- -PUT /{indices}/_mapping/{type} -PUT /{indices}/_alias/{name} -PUT /{indices}/_warmer/{name} - -GET /{indices}/_mapping/{types} -GET /{indices}/_alias/{names} -GET /{indices}/_settings/{names} -GET /{indices}/_warmer/{names} - -DELETE /{indices}/_mapping/{types} -DELETE /{indices}/_alias/{names} -DELETE /{indices}/_warmer/{names} ---------------- - -All of the `{indices}`, `{types}` and `{names}` parameters can be replaced by: - - * `_all`, `*` or blank (ie left out altogether), all of which mean ``all'' - * wildcards like `test*` - * comma-separated lists: `index_1,test_*` - -The only exception is `DELETE` which doesn't accept blank (missing) -parameters. If you want to delete something, you should be specific. - -Similarly, the return values for `GET` have been unified with the following -rules: - -* Only return values that exist. If you try to `GET` a mapping which doesn't - exist, then the result will be an empty object: `{}`. We no longer throw a - `404` if the requested mapping/warmer/alias/setting doesn't exist. - -* The response format always has the index name, then the section, then the - element name, for instance: -+ -[source,js] ---------------- -{ - "my_index": { - "mappings": { - "my_type": {...} - } - } -} ---------------- -+ -This is a breaking change for the `get_mapping` API. - -In the future we will also provide plural versions to allow putting multiple mappings etc in a single request. - -See <>, <>, <>, -<>, <>, -`warmers`, and <> for more details. - -=== Index request - -Previously a document could be indexed as itself, or wrapped in an outer -object which specified the `type` name: - -[source,js] ---------------- -PUT /my_index/my_type/1 -{ - "my_type": { - ... doc fields ... - } -} ---------------- - -This led to some ambiguity when a document also included a field with the same -name as the `type`. We no longer accept the outer `type` wrapper, but this -behaviour can be reenabled on an index-by-index basis with the setting: -`index.mapping.allow_type_wrapper`. - -=== Search requests - -While the `search` API takes a top-level `query` parameter, the -<>, `delete-by-query` and -<> requests expected the whole body to be a -query. These now _require_ a top-level `query` parameter: - -[source,js] ---------------- -GET /_count -{ - "query": { - "match": { - "title": "Interesting stuff" - } - } -} ---------------- - -Also, the top-level `filter` parameter in search has been renamed to -<>, to indicate that it should not -be used as the primary way to filter search results (use a -<> instead), but only to filter -results AFTER aggregations have been calculated. - -This example counts the top colors in all matching docs, but only returns docs -with color `red`: - -[source,js] ---------------- -GET /_search -{ - "query": { - "match_all": {} - }, - "aggs": { - "colors": { - "terms": { "field": "color" } - } - }, - "post_filter": { - "term": { - "color": "red" - } - } -} ---------------- - -=== Multi-fields - -Multi-fields are dead! Long live multi-fields! Well, the field type -`multi_field` has been removed. Instead, any of the core field types -(excluding `object` and `nested`) now accept a `fields` parameter. It's the -same thing, but nicer. Instead of: - -[source,js] ---------------- -"title": { - "type": "multi_field", - "fields": { - "title": { "type": "string" }, - "raw": { "type": "string", "index": "not_analyzed" } - } -} ---------------- - -you can now write: - -[source,js] ---------------- -"title": { - "type": "string", - "fields": { - "raw": { "type": "string", "index": "not_analyzed" } - } -} ---------------- - -Existing multi-fields will be upgraded to the new format automatically. - -Also, instead of having to use the arcane `path` and `index_name` parameters -in order to index multiple fields into a single ``custom +_all+ field'', you -can now use the <>. - -=== Stopwords - -Previously, the <> and -<> analyzers used the list of English stopwords -by default, which caused some hard to debug indexing issues. Now they are set to -use the empty stopwords list (ie `_none_`) instead. - -=== Dates without years - -When dates are specified without a year, for example: `Dec 15 10:00:00` they -are treated as dates in 2000 during indexing and range searches... except for -the upper included bound `lte` where they were treated as dates in 1970! Now, -all https://github.com/elastic/elasticsearch/issues/4451[dates without years] -use `1970` as the default. - -=== Parameters - -* Geo queries used to use `miles` as the default unit. And we - http://en.wikipedia.org/wiki/Mars_Climate_Orbiter[all know what - happened at NASA] because of that decision. The new default unit is - https://github.com/elastic/elasticsearch/issues/4515[`meters`]. - -* For all queries that support _fuzziness_, the `min_similarity`, `fuzziness` - and `edit_distance` parameters have been unified as the single parameter - `fuzziness`. See <> for details of accepted values. - -* The `ignore_missing` parameter has been replaced by the `expand_wildcards`, - `ignore_unavailable` and `allow_no_indices` parameters, all of which have - sensible defaults. See <> for more. - -* An index name (or pattern) is now required for destructive operations like - deleting indices: -+ -[source,sh] ---------------- -# v0.90 - delete all indices: -DELETE / - -# v1.0 - delete all indices: -DELETE /_all -DELETE /* ---------------- -+ -Setting `action.destructive_requires_name` to `true` provides further safety -by disabling wildcard expansion on destructive actions. - -=== Return values - -* The `ok` return value has been removed from all response bodies as it added - no useful information. - -* The `found`, `not_found` and `exists` return values have been unified as - `found` on all relevant APIs. - -* Field values, in response to the <> - parameter, are now always returned as arrays. A field could have single or - multiple values, which meant that sometimes they were returned as scalars - and sometimes as arrays. By always returning arrays, this simplifies user - code. The only exception to this rule is when `fields` is used to retrieve - metadata like the `routing` value, which are always singular. Metadata - fields are always returned as scalars. -+ -The `fields` parameter is intended to be used for retrieving stored fields, -rather than for fields extracted from the `_source`. That means that it can no -longer be used to return whole objects and it no longer accepts the -`_source.fieldname` format. For these you should use the -<> -parameters instead. - -* Settings, like `index.analysis.analyzer.default` are now returned as proper - nested JSON objects, which makes them easier to work with programmatically: -+ -[source,js] ---------------- -{ - "index": { - "analysis": { - "analyzer": { - "default": xxx - } - } - } -} ---------------- -+ -You can choose to return them in flattened format by passing `?flat_settings` -in the query string. - -* The <> API no longer supports the text response - format, but does support JSON and YAML. - -=== Deprecations - -* The `text` query has been removed. Use the - <> query instead. - -* The `field` query has been removed. Use the - <> query instead. - -* Per-document boosting with the `_boost` field has - been removed. You can use the - <> instead. - -* The `path` parameter in mappings has been deprecated. Use the - <> parameter instead. - -* The `custom_score` and `custom_boost_score` is no longer supported. You can - use <> instead. - -=== Percolator - -The percolator has been redesigned and because of this the dedicated `_percolator` index is no longer used by the percolator, -but instead the percolator works with a dedicated `.percolator` type. Read the http://www.elastic.co/blog/percolator-redesign-blog-post[redesigned percolator] -blog post for the reasons why the percolator has been redesigned. - -Elasticsearch will *not* delete the `_percolator` index when upgrading, only the percolate api will not use the queries -stored in the `_percolator` index. In order to use the already stored queries, you can just re-index the queries from the -`_percolator` index into any index under the reserved `.percolator` type. The format in which the percolate queries -were stored has *not* been changed. So a simple script that does a scan search to retrieve all the percolator queries -and then does a bulk request into another index should be sufficient. diff --git a/docs/reference/migration/migrate_1_4.asciidoc b/docs/reference/migration/migrate_1_4.asciidoc deleted file mode 100644 index c20504bbddf..00000000000 --- a/docs/reference/migration/migrate_1_4.asciidoc +++ /dev/null @@ -1,92 +0,0 @@ -[[breaking-changes-1.4]] -== Breaking changes in 1.4 - -This section discusses the changes that you need to be aware of when migrating -your application from Elasticsearch 1.x to Elasticsearch 1.4. - -[float] -=== Percolator - -In indices created with version `1.4.0` or later, percolation queries can only -refer to fields that already exist in the mappings in that index. There are -two ways to make sure that a field mapping exist: - -* Add or update a mapping via the <> or - <> apis. -* Percolate a document before registering a query. Percolating a document can - add field mappings dynamically, in the same way as happens when indexing a - document. - -[float] -=== Aliases - -<> can include <> which -are automatically applied to any search performed via the alias. -<> created with version `1.4.0` or later can only -refer to field names which exist in the mappings of the index (or indices) -pointed to by the alias. - -Add or update a mapping via the <> or -<> apis. - -[float] -=== Indices APIs - -The get warmer api will return a section for `warmers` even if there are -no warmers. This ensures that the following two examples are equivalent: - -[source,js] --------------------------------------------------- -curl -XGET 'http://localhost:9200/_all/_warmers' - -curl -XGET 'http://localhost:9200/_warmers' --------------------------------------------------- - -The <> will return a section for `aliases` even if there are -no aliases. This ensures that the following two examples are equivalent: - -[source,js] --------------------------------------------------- -curl -XGET 'http://localhost:9200/_all/_aliases' - -curl -XGET 'http://localhost:9200/_aliases' --------------------------------------------------- - -The <> will return a section for `mappings` even if there are -no mappings. This ensures that the following two examples are equivalent: - -[source,js] --------------------------------------------------- -curl -XGET 'http://localhost:9200/_all/_mappings' - -curl -XGET 'http://localhost:9200/_mappings' --------------------------------------------------- - -[float] -=== Bulk UDP - -Bulk UDP has been deprecated and will be removed in 2.0. -You should use <> instead. -Each cluster must have an elected master node in order to be fully operational. Once a node loses its elected master -node it will reject some or all operations. - -[float] -=== Zen discovery - -On versions before `1.4.0.Beta1` all operations are rejected when a node loses its elected master. From `1.4.0.Beta1` -only write operations will be rejected by default. Read operations will still be served based on the information available -to the node, which may result in being partial and possibly also stale. If the default is undesired then the -pre `1.4.0.Beta1` behaviour can be enabled, see: <> - -[float] -=== More Like This Field - -The More Like This Field query has been deprecated in favor of the <> -restrained set to a specific `field`. It will be removed in 2.0. - -[float] -=== MVEL is deprecated - -Groovy is the new default scripting language in Elasticsearch, and is enabled in `sandbox` mode -by default. MVEL has been removed from core, but is available as a plugin: -https://github.com/elastic/elasticsearch-lang-mvel diff --git a/docs/reference/migration/migrate_1_6.asciidoc b/docs/reference/migration/migrate_1_6.asciidoc deleted file mode 100644 index 9540d3b6759..00000000000 --- a/docs/reference/migration/migrate_1_6.asciidoc +++ /dev/null @@ -1,17 +0,0 @@ -[[breaking-changes-1.6]] -== Breaking changes in 1.6 - -This section discusses the changes that you need to be aware of when migrating -your application from Elasticsearch 1.x to Elasticsearch 1.6. - -[float] -=== More Like This API - -The More Like This API query has been deprecated and will be removed in 2.0. Instead use the <>. - -[float] -=== `top_children` query - -The `top_children` query has been deprecated and will be removed in 2.0. Instead the `has_child` query should be used. -The `top_children` query isn't always faster than the `has_child` query and the `top_children` query is often inaccurate. -The total hits and any aggregations in the same search request will likely be off. diff --git a/docs/reference/migration/migrate_2_0.asciidoc b/docs/reference/migration/migrate_2_0.asciidoc deleted file mode 100644 index adf12e7da5c..00000000000 --- a/docs/reference/migration/migrate_2_0.asciidoc +++ /dev/null @@ -1,73 +0,0 @@ -[[breaking-changes-2.0]] -== Breaking changes in 2.0 - -This section discusses the changes that you need to be aware of when migrating -your application to Elasticsearch 2.0. - -[float] -=== Indices created before 0.90 - -Elasticsearch 2.0 can read indices created in version 0.90 and above. If any -of your indices were created before 0.90 you will need to upgrade to the -latest 1.x version of Elasticsearch first, in order to upgrade your indices or -to delete the old indices. Elasticsearch will not start in the presence of old -indices. - -[float] -=== Elasticsearch migration plugin - -We have provided the https://github.com/elastic/elasticsearch-migration[Elasticsearch migration plugin] -to help you detect any issues that you may have when upgrading to -Elasticsearch 2.0. Please install and run the plugin *before* upgrading. - -[float] -=== Also see - -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> - -include::migrate_2_0/removals.asciidoc[] - -include::migrate_2_0/network.asciidoc[] - -include::migrate_2_0/striping.asciidoc[] - -include::migrate_2_0/mapping.asciidoc[] - -include::migrate_2_0/crud.asciidoc[] - -include::migrate_2_0/query_dsl.asciidoc[] - -include::migrate_2_0/search.asciidoc[] - -include::migrate_2_0/aggs.asciidoc[] - -include::migrate_2_0/parent_child.asciidoc[] - -include::migrate_2_0/scripting.asciidoc[] - -include::migrate_2_0/index_apis.asciidoc[] - -include::migrate_2_0/snapshot_restore.asciidoc[] - -include::migrate_2_0/packaging.asciidoc[] - -include::migrate_2_0/settings.asciidoc[] - -include::migrate_2_0/stats.asciidoc[] - -include::migrate_2_0/java.asciidoc[] diff --git a/docs/reference/migration/migrate_2_0/aggs.asciidoc b/docs/reference/migration/migrate_2_0/aggs.asciidoc deleted file mode 100644 index 1351b4cb4a3..00000000000 --- a/docs/reference/migration/migrate_2_0/aggs.asciidoc +++ /dev/null @@ -1,70 +0,0 @@ -[[breaking_20_aggregation_changes]] -=== Aggregation changes - -==== Min doc count defaults to zero - -Both the `histogram` and `date_histogram` aggregations now have a default -`min_doc_count` of `0` instead of `1`. - -==== Timezone for date field - -Specifying the `time_zone` parameter in queries or aggregations on fields of -type `date` must now be either an ISO 8601 UTC offset, or a timezone id. For -example, the value `+1:00` must now be written as `+01:00`. - -==== Time zones and offsets - -The `histogram` and the `date_histogram` aggregation now support a simplified -`offset` option that replaces the previous `pre_offset` and `post_offset` -rounding options. Instead of having to specify two separate offset shifts of -the underlying buckets, the `offset` option moves the bucket boundaries in -positive or negative direction depending on its argument. - -The `date_histogram` options for `pre_zone` and `post_zone` are replaced by -the `time_zone` option. The behavior of `time_zone` is equivalent to the -former `pre_zone` option. Setting `time_zone` to a value like "+01:00" now -will lead to the bucket calculations being applied in the specified time zone. -The `key` is returned as the timestamp in UTC, but the `key_as_string` is -returned in the time zone specified. - -In addition to this, the `pre_zone_adjust_large_interval` is removed because -we now always return dates and bucket keys in UTC. - -==== Including/excluding terms - -`include`/`exclude` filtering on the `terms` aggregation now uses the same -syntax as <> instead of the Java regular -expression syntax. While simple regexps should still work, more complex ones -might need some rewriting. Also, the `flags` parameter is no longer supported. - -==== Boolean fields - -Aggregations on `boolean` fields will now return `0` and `1` as keys, and -`"true"` and `"false"` as string keys. See <> for more -information. - - -==== Java aggregation classes - -The `date_histogram` aggregation now returns a `Histogram` object in the -response, and the `DateHistogram` class has been removed. Similarly the -`date_range`, `ipv4_range`, and `geo_distance` aggregations all return a -`Range` object in the response, and the `IPV4Range`, `DateRange`, and -`GeoDistance` classes have been removed. - -The motivation for this is to have a single response API for the Range and -Histogram aggregations regardless of the type of data being queried. To -support this some changes were made in the `MultiBucketAggregation` interface -which applies to all bucket aggregations: - -* The `getKey()` method now returns `Object` instead of `String`. The actual - object type returned depends on the type of aggregation requested (e.g. the - `date_histogram` will return a `DateTime` object for this method whereas a - `histogram` will return a `Number`). -* A `getKeyAsString()` method has been added to return the String - representation of the key. -* All other `getKeyAsX()` methods have been removed. -* The `getBucketAsKey(String)` methods have been removed on all aggregations - except the `filters` and `terms` aggregations. - - diff --git a/docs/reference/migration/migrate_2_0/crud.asciidoc b/docs/reference/migration/migrate_2_0/crud.asciidoc deleted file mode 100644 index ef3ba93e67e..00000000000 --- a/docs/reference/migration/migrate_2_0/crud.asciidoc +++ /dev/null @@ -1,130 +0,0 @@ -[[breaking_20_crud_and_routing_changes]] -=== CRUD and routing changes - -==== Explicit custom routing - -Custom `routing` values can no longer be extracted from the document body, but -must be specified explicitly as part of the query string, or in the metadata -line in the <> API. See <> for an -example. - -==== Routing hash function - -The default hash function that is used for routing has been changed from -`djb2` to `murmur3`. This change should be transparent unless you relied on -very specific properties of `djb2`. This will help ensure a better balance of -the document counts between shards. - -In addition, the following routing-related node settings have been deprecated: - -`cluster.routing.operation.hash.type`:: - - This was an undocumented setting that allowed to configure which hash function - to use for routing. `murmur3` is now enforced on new indices. - -`cluster.routing.operation.use_type`:: - - This was an undocumented setting that allowed to take the `_type` of the - document into account when computing its shard (default: `false`). `false` is - now enforced on new indices. - -==== Delete API with custom routing - -The delete API used to be broadcast to all shards in the index which meant -that, when using custom routing, the `routing` parameter was optional. Now, -the delete request is forwarded only to the shard holding the document. If you -are using custom routing then you should specify the `routing` value when -deleting a document, just as is already required for the `index`, `create`, -and `update` APIs. - -To make sure that you never forget a routing value, make routing required with -the following mapping: - -[source,js] ---------------------------- -PUT my_index -{ - "mappings": { - "my_type": { - "_routing": { - "required": true - } - } - } -} ---------------------------- - -==== All stored meta-fields returned by default - -Previously, meta-fields like `_routing`, `_timestamp`, etc would only be -included in a GET request if specifically requested with the `fields` -parameter. Now, all meta-fields which have stored values will be returned by -default. Additionally, they are now returned at the top level (along with -`_index`, `_type`, and `_id`) instead of in the `fields` element. - -For instance, the following request: - -[source,sh] ---------------- -GET /my_index/my_type/1 ---------------- - -might return: - -[source,js] ---------------- -{ - "_index": "my_index", - "_type": "my_type", - "_id": "1", - "_timestamp": 10000000, <1> - "_source": { - "foo" : [ "bar" ] - } -} ---------------- -<1> The `_timestamp` is returned by default, and at the top level. - - -==== Async replication - -The `replication` parameter has been removed from all CRUD operations -(`index`, `create`, `update`, `delete`, `bulk`) as it interfered with the -<> feature. These operations are now -synchronous only and a request will only return once the changes have been -replicated to all active shards in the shard group. - -Instead, use more client processes to send more requests in parallel. - -==== Documents must be specified without a type wrapper - -Previously, the document body could be wrapped in another object with the name -of the `type`: - -[source,js] --------------------------- -PUT my_index/my_type/1 -{ - "my_type": { <1> - "text": "quick brown fox" - } -} --------------------------- -<1> This `my_type` wrapper is not part of the document itself, but represents the document type. - -This feature was deprecated before but could be reenabled with the -`mapping.allow_type_wrapper` index setting. This setting is no longer -supported. The above document should be indexed as follows: - -[source,js] --------------------------- -PUT my_index/my_type/1 -{ - "text": "quick brown fox" -} --------------------------- - -==== Term Vectors API - -Usage of `/_termvector` is deprecated in favor of `/_termvectors`. - diff --git a/docs/reference/migration/migrate_2_0/index_apis.asciidoc b/docs/reference/migration/migrate_2_0/index_apis.asciidoc deleted file mode 100644 index c177a887866..00000000000 --- a/docs/reference/migration/migrate_2_0/index_apis.asciidoc +++ /dev/null @@ -1,43 +0,0 @@ -[[breaking_20_index_api_changes]] -=== Index API changes - -==== Index aliases - - -Fields used in alias filters no longer have to exist in the mapping at alias -creation time. Previously, alias filters were parsed at alias creation time -and the parsed form was cached in memory. Now, alias filters are parsed at -request time and the fields in filters are resolved from the current mapping. - -This also means that index aliases now support `has_parent` and `has_child` -queries. - -The <> will now throw an exception if no -matching aliases are found. This change brings the defaults for this API in -line with the other Indices APIs. The <> options can be used on a -request to change this behavior. - -==== File based index templates - -Index templates can no longer be configured on disk. Use the -<> API instead. - -==== Analyze API changes - - -The Analyze API now returns the `position` of the first token as `0` -instead of `1`. - -The `prefer_local` parameter has been removed. The `_analyze` API is a light -operation and the caller shouldn't be concerned about whether it executes on -the node that receives the request or another node. - -The `text()` method on `AnalyzeRequest` now returns `String[]` instead of -`String`. - -==== Removed `id_cache` from clear cache api - -The <> API no longer supports the `id_cache` -option. Instead, use the `fielddata` option to clear the cache for the -`_parent` field. - diff --git a/docs/reference/migration/migrate_2_0/java.asciidoc b/docs/reference/migration/migrate_2_0/java.asciidoc deleted file mode 100644 index b2f5ee63e0d..00000000000 --- a/docs/reference/migration/migrate_2_0/java.asciidoc +++ /dev/null @@ -1,147 +0,0 @@ -[[breaking_20_java_api_changes]] -=== Java API changes - -==== Transport API construction - -The `TransportClient` construction code has changed, it now uses the builder -pattern. Instead of: - -[source,java] --------------------------------------------------- -Settings settings = Settings.settingsBuilder() - .put("cluster.name", "myClusterName").build(); -Client client = new TransportClient(settings); --------------------------------------------------- - -Use the following: - -[source,java] --------------------------------------------------- -Settings settings = Settings.settingsBuilder() - .put("cluster.name", "myClusterName").build(); -Client client = TransportClient.builder().settings(settings).build(); --------------------------------------------------- - -The transport client also no longer supports loading settings from config files. -If you have a config file, you can load it into settings yourself before -constructing the transport client: - -[source,java] --------------------------------------------------- -Settings settings = Settings.settingsBuilder() - .loadFromPath(pathToYourSettingsFile).build(); -Client client = TransportClient.builder().settings(settings).build(); --------------------------------------------------- - -==== Exception are only thrown on total failure - -Previously, many APIs would throw an exception if any shard failed to execute -the request. Now the exception is only thrown if all shards fail the request. -The responses for these APIs will always have a `getShardFailures` method that -you can and should check for failures. - - -==== IndexMissingException removed. - -Use `IndexNotFoundException` instead. - - -==== Automatically thread client listeners - -Previously, the user had to set request listener threads to `true` when on the -client side in order not to block IO threads on heavy operations. This proved -to be very trappy for users, and ended up creating problems that are very hard -to debug. - -In 2.0, Elasticsearch automatically threads listeners that are used from the -client when the client is a node client or a transport client. Threading can -no longer be manually set. - - -==== Query/filter refactoring - -`org.elasticsearch.index.queries.FilterBuilders` has been removed as part of the merge of -queries and filters. These filters are now available in `QueryBuilders` with the same name. -All methods that used to accept a `FilterBuilder` now accept a `QueryBuilder` instead. - -In addition some query builders have been removed or renamed: - -* `commonTerms(...)` renamed with `commonTermsQuery(...)` -* `queryString(...)` renamed with `queryStringQuery(...)` -* `simpleQueryString(...)` renamed with `simpleQueryStringQuery(...)` -* `textPhrase(...)` removed -* `textPhrasePrefix(...)` removed -* `textPhrasePrefixQuery(...)` removed -* `filtered(...)` removed. Use `filteredQuery(...)` instead. -* `inQuery(...)` removed. - -==== GetIndexRequest - -`GetIndexRequest.features()` now returns an array of Feature Enums instead of an array of String values. - -The following deprecated methods have been removed: - -* `GetIndexRequest.addFeatures(String[])` - Use - `GetIndexRequest.addFeatures(Feature[])` instead - -* `GetIndexRequest.features(String[])` - Use - `GetIndexRequest.features(Feature[])` instead. - -* `GetIndexRequestBuilder.addFeatures(String[])` - Use - `GetIndexRequestBuilder.addFeatures(Feature[])` instead. - -* `GetIndexRequestBuilder.setFeatures(String[])` - Use - `GetIndexRequestBuilder.setFeatures(Feature[])` instead. - - -==== BytesQueryBuilder removed - -The redundant BytesQueryBuilder has been removed in favour of the -WrapperQueryBuilder internally. - -==== TermsQueryBuilder execution removed - -The `TermsQueryBuilder#execution` method has been removed as it has no effect, it is ignored by the - corresponding parser. - -==== ImmutableSettings removed - -Use `Settings.builder()` instead of `ImmutableSettings.builder()`. - -==== InetSocketTransportAddress removed - -Use `InetSocketTransportAddress(InetSocketAddress address)` instead of `InetSocketTransportAddress(String, int)`. -You can create an InetSocketAddress instance with `InetSocketAddress(String, int)`. For example: - -[source,java] ------------------------------ -new InetSocketTransportAddress(new InetSocketAddress("127.0.0.1", 0)); ------------------------------ - -==== Request Builders refactoring - -An `action` parameter has been added to various request builders: - -* Instead of `new SnapshotsStatusRequestBuilder(elasticSearchClient)` use `new SnapshotsStatusRequestBuilder(elasticSearchClient, SnapshotsStatusAction.INSTANCE)`. - -* Instead of `new CreateSnapshotRequestBuilder(elasticSearchClient)` use `new CreateSnapshotRequestBuilder(elasticSearchClient, CreateSnapshotAction.INSTANCE)`. - -* Instead of `new CreateIndexRequestBuilder(elasticSearchClient, index)` use `new CreateIndexRequestBuilder(elasticSearchClient, CreateIndexAction.INSTANCE, index)`. - -==== Shading and package relocation removed - -Elasticsearch used to shade its dependencies and to relocate packages. We no longer use shading or relocation. -You might need to change your imports to the original package names: - -* `com.google.common` was `org.elasticsearch.common` -* `com.carrotsearch.hppc` was `org.elasticsearch.common.hppc` -* `jsr166e` was `org.elasticsearch.common.util.concurrent.jsr166e` -* `com.fasterxml.jackson` was `org.elasticsearch.common.jackson` -* `org.joda.time` was `org.elasticsearch.common.joda.time` -* `org.joda.convert` was `org.elasticsearch.common.joda.convert` -* `org.jboss.netty` was `org.elasticsearch.common.netty` -* `com.ning.compress` was `org.elasticsearch.common.compress` -* `com.github.mustachejava` was `org.elasticsearch.common.mustache` -* `com.tdunning.math.stats` was `org.elasticsearch.common.stats` -* `org.apache.commons.lang` was `org.elasticsearch.common.lang` -* `org.apache.commons.cli` was `org.elasticsearch.common.cli.commons` diff --git a/docs/reference/migration/migrate_2_0/mapping.asciidoc b/docs/reference/migration/migrate_2_0/mapping.asciidoc deleted file mode 100644 index b4ee0d54412..00000000000 --- a/docs/reference/migration/migrate_2_0/mapping.asciidoc +++ /dev/null @@ -1,439 +0,0 @@ -[[breaking_20_mapping_changes]] -=== Mapping changes - -A number of changes have been made to mappings to remove ambiguity and to -ensure that conflicting mappings cannot be created. - -One major change is that dynamically added fields must have their mapping -confirmed by the master node before indexing continues. This is to avoid a -problem where different shards in the same index dynamically add different -mappings for the same field. These conflicting mappings can silently return -incorrect results and can lead to index corruption. - -This change can make indexing slower when frequently adding many new fields. -We are looking at ways of optimising this process but we chose safety over -performance for this extreme use case. - -==== Conflicting field mappings - -Fields with the same name, in the same index, in different types, must have -the same mapping, with the exception of the <>, <>, -<>, <>, <>, and <> -parameters, which may have different settings per field. - -[source,js] ---------------- -PUT my_index -{ - "mappings": { - "type_one": { - "properties": { - "name": { <1> - "type": "string" - } - } - }, - "type_two": { - "properties": { - "name": { <1> - "type": "string", - "analyzer": "english" - } - } - } - } -} ---------------- -<1> The two `name` fields have conflicting mappings and will prevent Elasticsearch - from starting. - -Elasticsearch will not start in the presence of conflicting field mappings. -These indices must be deleted or reindexed using a new mapping. - -The `ignore_conflicts` option of the put mappings API has been removed. -Conflicts can't be ignored anymore. - -==== Fields cannot be referenced by short name - -A field can no longer be referenced using its short name. Instead, the full -path to the field is required. For instance: - -[source,js] ---------------- -PUT my_index -{ - "mappings": { - "my_type": { - "properties": { - "title": { "type": "string" }, <1> - "name": { - "properties": { - "title": { "type": "string" }, <2> - "first": { "type": "string" }, - "last": { "type": "string" } - } - } - } - } - } -} ---------------- -<1> This field is referred to as `title`. -<2> This field is referred to as `name.title`. - -Previously, the two `title` fields in the example above could have been -confused with each other when using the short name `title`. - -==== Type name prefix removed - -Previously, two fields with the same name in two different types could -sometimes be disambiguated by prepending the type name. As a side effect, it -would add a filter on the type name to the relevant query. This feature was -ambiguous -- a type name could be confused with a field name -- and didn't -work everywhere e.g. aggregations. - -Instead, fields should be specified with the full path, but without a type -name prefix. If you wish to filter by the `_type` field, either specify the -type in the URL or add an explicit filter. - -The following example query in 1.x: - -[source,js] ----------------------------- -GET my_index/_search -{ - "query": { - "match": { - "my_type.some_field": "quick brown fox" - } - } -} ----------------------------- - -would be rewritten in 2.0 as: - -[source,js] ----------------------------- -GET my_index/my_type/_search <1> -{ - "query": { - "match": { - "some_field": "quick brown fox" <2> - } - } -} ----------------------------- -<1> The type name can be specified in the URL to act as a filter. -<2> The field name should be specified without the type prefix. - -==== Field names may not contain dots - -In 1.x, it was possible to create fields with dots in their name, for -instance: - -[source,js] ----------------------------- -PUT my_index -{ - "mappings": { - "my_type": { - "properties": { - "foo.bar": { <1> - "type": "string" - }, - "foo": { - "properties": { - "bar": { <1> - "type": "string" - } - } - } - } - } - } -} ----------------------------- -<1> These two fields cannot be distinguished as both are referred to as `foo.bar`. - -You can no longer create fields with dots in the name. - -==== Type names may not start with a dot - -In 1.x, Elasticsearch would issue a warning if a type name included a dot, -e.g. `my.type`. Now that type names are no longer used to distinguish between -fields in different types, this warning has been relaxed: type names may now -contain dots, but they may not *begin* with a dot. The only exception to this -is the special `.percolator` type. - -==== Type names may not be longer than 255 characters - -Mapping type names may not be longer than 255 characters. Long type names -will continue to function on indices created before upgrade, but it will not -be possible create types with long names in new indices. - -==== Types may no longer be deleted - -In 1.x it was possible to delete a type mapping, along with all of the -documents of that type, using the delete mapping API. This is no longer -supported, because remnants of the fields in the type could remain in the -index, causing corruption later on. - -Instead, if you need to delete a type mapping, you should reindex to a new -index which does not contain the mapping. If you just need to delete the -documents that belong to that type, then use the delete-by-query plugin -instead. - -[[migration-meta-fields]] -==== Type meta-fields - -The <> associated with had configuration options -removed, to make them more reliable: - -* `_id` configuration can no longer be changed. If you need to sort, use the <> field instead. -* `_type` configuration can no longer be changed. -* `_index` configuration can no longer be changed. -* `_routing` configuration is limited to marking routing as required. -* `_field_names` configuration is limited to disabling the field. -* `_size` configuration is limited to enabling the field. -* `_timestamp` configuration is limited to enabling the field, setting format and default value. -* `_boost` has been removed. -* `_analyzer` has been removed. - -Importantly, *meta-fields can no longer be specified as part of the document -body.* Instead, they must be specified in the query string parameters. For -instance, in 1.x, the `routing` could be specified as follows: - -[source,json] ------------------------------ -PUT my_index -{ - "mappings": { - "my_type": { - "_routing": { - "path": "group" <1> - }, - "properties": { - "group": { <1> - "type": "string" - } - } - } - } -} - -PUT my_index/my_type/1 <2> -{ - "group": "foo" -} ------------------------------ -<1> This 1.x mapping tells Elasticsearch to extract the `routing` value from the `group` field in the document body. -<2> This indexing request uses a `routing` value of `foo`. - -In 2.0, the routing must be specified explicitly: - -[source,json] ------------------------------ -PUT my_index -{ - "mappings": { - "my_type": { - "_routing": { - "required": true <1> - }, - "properties": { - "group": { - "type": "string" - } - } - } - } -} - -PUT my_index/my_type/1?routing=bar <2> -{ - "group": "foo" -} ------------------------------ -<1> Routing can be marked as required to ensure it is not forgotten during indexing. -<2> This indexing request uses a `routing` value of `bar`. - -==== `_timestamp` and `_ttl` deprecated - -The `_timestamp` and `_ttl` fields are deprecated, but will remain functional -for the remainder of the 2.x series. - -Instead of the `_timestamp` field, use a normal <> field and set -the value explicitly. - -The current `_ttl` functionality will be replaced in a future version with a -new implementation of TTL, possibly with different semantics, and will not -depend on the `_timestamp` field. - -==== Analyzer mappings - -Previously, `index_analyzer` and `search_analyzer` could be set separately, -while the `analyzer` setting would set both. The `index_analyzer` setting has -been removed in favour of just using the `analyzer` setting. - -If just the `analyzer` is set, it will be used at index time and at search time. To use a different analyzer at search time, specify both the `analyzer` and a `search_analyzer`. - -The `index_analyzer`, `search_analyzer`, and `analyzer` type-level settings -have also been removed, as it is no longer possible to select fields based on -the type name. - -The `_analyzer` meta-field, which allowed setting an analyzer per document has -also been removed. It will be ignored on older indices. - -==== Date fields and Unix timestamps - -Previously, `date` fields would first try to parse values as a Unix timestamp --- milliseconds-since-the-epoch -- before trying to use their defined date -`format`. This meant that formats like `yyyyMMdd` could never work, as values -would be interpreted as timestamps. - -In 2.0, we have added two formats: `epoch_millis` and `epoch_second`. Only -date fields that use these formats will be able to parse timestamps. - -These formats cannot be used in dynamic templates, because they are -indistinguishable from long values. - -==== Default date format - -The default date format has changed from `date_optional_time` to -`strict_date_optional_time`, which expects a 4 digit year, and a 2 digit month -and day, (and optionally, 2 digit hour, minute, and second). - -A dynamically added date field, by default, includes the `epoch_millis` -format to support timestamp parsing. For instance: - -[source,js] -------------------------- -PUT my_index/my_type/1 -{ - "date_one": "2015-01-01" <1> -} -------------------------- -<1> Has `format`: `"strict_date_optional_time||epoch_millis"`. - -==== `mapping.date.round_ceil` setting - -The `mapping.date.round_ceil` setting for date math parsing has been removed. - -[[migration-bool-fields]] -==== Boolean fields - -Boolean fields used to have a string fielddata with `F` meaning `false` and `T` -meaning `true`. They have been refactored to use numeric fielddata, with `0` -for `false` and `1` for `true`. As a consequence, the format of the responses of -the following APIs changed when applied to boolean fields: `0`/`1` is returned -instead of `F`/`T`: - -* <> -* <> -* <> - -In addition, terms aggregations use a custom formatter for boolean (like for -dates and ip addresses, which are also backed by numbers) in order to return -the user-friendly representation of boolean fields: `false`/`true`: - -[source,js] ---------------- -"buckets": [ - { - "key": 0, - "key_as_string": "false", - "doc_count": 42 - }, - { - "key": 1, - "key_as_string": "true", - "doc_count": 12 - } -] ---------------- - -==== `index_name` and `path` removed - -The `index_name` setting was used to change the name of the Lucene field, -and the `path` setting was used on `object` fields to determine whether the -Lucene field should use the full path (including parent object fields), or -just the final `name`. - -These setting have been removed as their purpose is better served with the -<> parameter. - -==== Murmur3 Fields - -Fields of type `murmur3` can no longer change `doc_values` or `index` setting. -They are always mapped as follows: - -[source,js] ---------------------- -{ - "type": "murmur3", - "index": "no", - "doc_values": true -} ---------------------- - -==== Mappings in config files not supported - -The ability to specify mappings in configuration files has been removed. To -specify default mappings that apply to multiple indexes, use -<> instead. - -Along with this change, the following settings have been removed: - -* `index.mapper.default_mapping_location` -* `index.mapper.default_percolator_mapping_location` - -==== Fielddata formats - -Now that doc values are the default for fielddata, specialized in-memory -formats have become an esoteric option. These fielddata formats have been removed: - -* `fst` on string fields -* `compressed` on geo points - -The default fielddata format will be used instead. - -==== Posting and doc-values codecs - -It is no longer possible to specify per-field postings and doc values formats -in the mappings. This setting will be ignored on indices created before 2.0 -and will cause mapping parsing to fail on indices created on or after 2.0. For -old indices, this means that new segments will be written with the default -postings and doc values formats of the current codec. - -It is still possible to change the whole codec by using the `index.codec` -setting. Please however note that using a non-default codec is discouraged as -it could prevent future versions of Elasticsearch from being able to read the -index. - -==== Compress and compress threshold - -The `compress` and `compress_threshold` options have been removed from the -`_source` field and fields of type `binary`. These fields are compressed by -default. If you would like to increase compression levels, use the new -<> setting instead. - -==== position_offset_gap - -The `position_offset_gap` option is renamed to 'position_increment_gap'. This was -done to clear away the confusion. Elasticsearch's 'position_increment_gap' now is -mapped directly to Lucene's 'position_increment_gap' - -The default `position_increment_gap` is now 100. Indexes created in Elasticsearch -2.0.0 will default to using 100 and indexes created before that will continue -to use the old default of 0. This was done to prevent phrase queries from -matching across different values of the same term unexpectedly. Specifically, -100 was chosen to cause phrase queries with slops up to 99 to match only within -a single value of a field. - -==== copy_to and multi fields - -A <> within a <> is ignored from version 2.0 on. With any version after -2.1 or 2.0.1 creating a mapping that has a copy_to within a multi field will result -in an exception. - - diff --git a/docs/reference/migration/migrate_2_0/network.asciidoc b/docs/reference/migration/migrate_2_0/network.asciidoc deleted file mode 100644 index d493bff5688..00000000000 --- a/docs/reference/migration/migrate_2_0/network.asciidoc +++ /dev/null @@ -1,39 +0,0 @@ -[[breaking_20_network_changes]] -=== Network changes - -==== Bind to localhost - -Elasticsearch 2.x will only bind to localhost by default. It will try to bind -to both 127.0.0.1 (IPv4) and [::1] (IPv6), but will work happily in -environments where only IPv4 or IPv6 is available. This change prevents -Elasticsearch from trying to connect to other nodes on your network unless you -specifically tell it to do so. When moving to production you should configure -the `network.host` parameter, either in the `elasticsearch.yml` config file or -on the command line: - -[source,sh] --------------------- -bin/elasticsearch --network.host 192.168.1.5 -bin/elasticsearch --network.host _non_loopback_ --------------------- - -The full list of options that network.host accepts can be found in the <>. - -==== Unicast discovery - -When bound to localhost, Elasticsearch will use unicast to contact -the first 5 ports in the `transport.tcp.port` range, which defaults to -`9300-9400`. This preserves the zero-config auto-clustering experience for the developer, -but it means that you will have to provide a list of <> -when moving to production, for instance: - -[source,yaml] ---------------------- -discovery.zen.ping.unicast.hosts: [ 192.168.1.2, 192.168.1.3 ] ---------------------- - -You don’t need to list all of the nodes in your cluster as unicast hosts, but -you should specify at least a quorum (majority) of master-eligible nodes. A -big cluster will typically have three dedicated master nodes, in which case we -recommend listing all three of them as unicast hosts. - diff --git a/docs/reference/migration/migrate_2_0/packaging.asciidoc b/docs/reference/migration/migrate_2_0/packaging.asciidoc deleted file mode 100644 index dae87187ba4..00000000000 --- a/docs/reference/migration/migrate_2_0/packaging.asciidoc +++ /dev/null @@ -1,84 +0,0 @@ -[[breaking_20_plugin_and_packaging_changes]] -=== Plugin and packaging changes - -==== Symbolic links and paths - -Elasticsearch 2.0 runs with the Java security manager enabled and is much more -restrictive about which paths it is allowed to access. Various paths can be -configured, e.g. `path.data`, `path.scripts`, `path.repo`. A configured path -may itself be a symbolic link, but no symlinks under that path will be -followed. - -==== Running `bin/elasticsearch` - -The command line parameter parsing has been rewritten to deal properly with -spaces in parameters. All config settings can still be specified on the -command line when starting Elasticsearch, but they must appear after the -built-in "static parameters", such as `-d` (to daemonize) and `-p` (the PID path). - -For instance: - -[source,sh] ------------ -bin/elasticsearch -d -p /tmp/foo.pid --http.cors.enabled=true --http.cors.allow-origin='*' ------------ - -For a list of static parameters, run `bin/elasticsearch -h` - -==== `-f` removed - -The `-f` parameter, which used to indicate that Elasticsearch should be run in -the foreground, was deprecated in 1.0 and removed in 2.0. - -==== `V` for version - -The `-v` parameter now means `--verbose` for both `bin/elasticsearch-plugin` and -`bin/elasticsearch` (although it has no effect on the latter). To output the -version, use `-V` or `--version` instead. - -==== Plugin manager should run as root - -The permissions of the `config`, `bin`, and `plugins` directories in the RPM -and deb packages have been made more restrictive. The plugin manager should -be run as root otherwise it will not be able to install plugins. - -==== Support for official plugins - -Almost all of the official Elasticsearch plugins have been moved to the main -`elasticsearch` repository. They will be released at the same time as -Elasticsearch and have the same version number as Elasticsearch. - -Official plugins can be installed as follows: - -[source,sh] ---------------- -sudo bin/elasticsearch-plugin install analysis-icu ---------------- - -Community-provided plugins can be installed as before. - -==== Plugins require descriptor file - -All plugins are now required to have a https://github.com/elastic/elasticsearch/blob/2.0/dev-tools/src/main/resources/plugin-metadata/plugin-descriptor.properties[plugin-descriptor.properties] file. If a node has a plugin installed which lacks this file, it will be unable to start. - -==== Repository naming structure changes - -Elasticsearch 2.0 changes the way the repository URLs are referenced. Instead -of specific repositories for both major and minor versions, the repositories will -use a major version reference only. - -The URL for apt packages now uses the following structure; - -[source,sh] ---------------- -deb http://packages.elastic.co/elasticsearch/2.x/debian stable main ---------------- - -And for yum packages it is; - -[source,sh] ---------------- -baseurl=http://packages.elastic.co/elasticsearch/2.x/centos ---------------- - -The <> page details this change. diff --git a/docs/reference/migration/migrate_2_0/parent_child.asciidoc b/docs/reference/migration/migrate_2_0/parent_child.asciidoc deleted file mode 100644 index 1addf883973..00000000000 --- a/docs/reference/migration/migrate_2_0/parent_child.asciidoc +++ /dev/null @@ -1,43 +0,0 @@ -[[breaking_20_parent_child_changes]] -=== Parent/Child changes - -Parent/child has been rewritten completely to reduce memory usage and to -execute `has_child` and `has_parent` queries faster and more efficient. The -`_parent` field uses doc values by default. The refactored and improved -implementation is only active for indices created on or after version 2.0. - -In order to benefit from all the performance and memory improvements, we -recommend reindexing all existing indices that use the `_parent` field. - -==== Parent type cannot pre-exist - -A mapping type is declared as a child of another mapping type by specifying -the `_parent` meta field: - -[source,js] --------------------------- -DELETE * - -PUT my_index -{ - "mappings": { - "my_parent": {}, - "my_child": { - "_parent": { - "type": "my_parent" <1> - } - } - } -} --------------------------- -<1> The `my_parent` type is the parent of the `my_child` type. - -The mapping for the parent type can be added at the same time as the mapping -for the child type, but cannot be added before the child type. - -==== `top_children` query removed - -The `top_children` query has been removed in favour of the `has_child` query. -It wasn't always faster than the `has_child` query and the results were usually -inaccurate. The total hits and any aggregations in the same search request -would be incorrect if `top_children` was used. diff --git a/docs/reference/migration/migrate_2_0/query_dsl.asciidoc b/docs/reference/migration/migrate_2_0/query_dsl.asciidoc deleted file mode 100644 index a85ade8690b..00000000000 --- a/docs/reference/migration/migrate_2_0/query_dsl.asciidoc +++ /dev/null @@ -1,189 +0,0 @@ -[[breaking_20_query_dsl_changes]] -=== Query DSL changes - -==== Queries and filters merged - -Queries and filters have been merged -- all filter clauses are now query -clauses. Instead, query clauses can now be used in _query context_ or in -_filter context_: - -Query context:: - -A query used in query context will calculate relevance scores and will not be -cacheable. Query context is used whenever filter context does not apply. - -Filter context:: -+ --- - -A query used in filter context will not calculate relevance scores, and will -be cacheable. Filter context is introduced by: - -* the `constant_score` query -* the `must_not` and (newly added) `filter` parameter in the `bool` query -* the `filter` and `filters` parameters in the `function_score` query -* any API called `filter`, such as the `post_filter` search parameter, or in - aggregations or index aliases --- - -==== `terms` query and filter - -The `execution` option of the `terms` filter is now deprecated and is ignored -if provided. Similarly, the `terms` query no longer supports the -`minimum_should_match` parameter. - -==== `or` and `and` now implemented via `bool` - -The `or` and `and` filters previously had a different execution pattern to the -`bool` filter. It used to be important to use `and`/`or` with certain filter -clauses, and `bool` with others. - -This distinction has been removed: the `bool` query is now smart enough to -handle both cases optimally. As a result of this change, the `or` and `and` -filters are now sugar syntax which are executed internally as a `bool` query. -These filters may be removed in the future. - -==== `filtered` query and `query` filter deprecated - -The `query` filter is deprecated as is it no longer needed -- all queries can -be used in query or filter context. - -The `filtered` query is deprecated in favour of the `bool` query. Instead of -the following: - -[source,js] -------------------------- -GET _search -{ - "query": { - "filtered": { - "query": { - "match": { - "text": "quick brown fox" - } - }, - "filter": { - "term": { - "status": "published" - } - } - } - } -} -------------------------- - -move the query and filter to the `must` and `filter` parameters in the `bool` -query: - -[source,js] -------------------------- -GET _search -{ - "query": { - "bool": { - "must": { - "match": { - "text": "quick brown fox" - } - }, - "filter": { - "term": { - "status": "published" - } - } - } - } -} -------------------------- - -==== Filter auto-caching - -It used to be possible to control which filters were cached with the `_cache` -option and to provide a custom `_cache_key`. These options are deprecated -and, if present, will be ignored. - -Query clauses used in filter context are now auto-cached when it makes sense -to do so. The algorithm takes into account the frequency of use, the cost of -query execution, and the cost of building the filter. - -The `terms` filter lookup mechanism no longer caches the values of the -document containing the terms. It relies on the filesystem cache instead. If -the lookup index is not too large, it is recommended to replicate it to all -nodes by setting `index.auto_expand_replicas: 0-all` in order to remove the -network overhead as well. - -==== Numeric queries use IDF for scoring - -Previously, term queries on numeric fields were deliberately prevented from -using the usual Lucene scoring logic and this behaviour was undocumented and, -to some, unexpected. - -Single `term` queries on numeric fields now score in the same way as string -fields, using IDF and norms (if enabled). - -To query numeric fields without scoring, the query clause should be used in -filter context, e.g. in the `filter` parameter of the `bool` query, or wrapped -in a `constant_score` query: - -[source,js] ----------------------------- -GET _search -{ - "query": { - "bool": { - "must": [ - { - "match": { <1> - "numeric_tag": 5 - } - } - ], - "filter": [ - { - "match": { <2> - "count": 5 - } - } - ] - } - } -} ----------------------------- -<1> This clause would include IDF in the relevance score calculation. -<2> This clause would have no effect on the relevance score. - -==== Fuzziness and fuzzy-like-this - -Fuzzy matching used to calculate the score for each fuzzy alternative, meaning -that rare misspellings would have a higher score than the more common correct -spellings. Now, fuzzy matching blends the scores of all the fuzzy alternatives -to use the IDF of the most frequently occurring alternative. - -Fuzziness can no longer be specified using a percentage, but should instead -use the number of allowed edits: - -* `0`, `1`, `2`, or -* `AUTO` (which chooses `0`, `1`, or `2` based on the length of the term) - -The `fuzzy_like_this` and `fuzzy_like_this_field` queries used a very -expensive approach to fuzzy matching and have been removed. - -==== More Like This - -The More Like This (`mlt`) API and the `more_like_this_field` (`mlt_field`) -query have been removed in favor of the -<> query. - -The parameter `percent_terms_to_match` has been removed in favor of -`minimum_should_match`. - -==== `limit` filter deprecated - -The `limit` filter is deprecated and becomes a no-op. You can achieve similar -behaviour using the <> parameter. - -==== Java plugins registering custom queries - -Java plugins that register custom queries can do so by using the -`IndicesQueriesModule#addQuery(Class)` method. Other -ways to register custom queries are not supported anymore. diff --git a/docs/reference/migration/migrate_2_0/removals.asciidoc b/docs/reference/migration/migrate_2_0/removals.asciidoc deleted file mode 100644 index 31693c3d3ac..00000000000 --- a/docs/reference/migration/migrate_2_0/removals.asciidoc +++ /dev/null @@ -1,100 +0,0 @@ -[[breaking_20_removed_features]] -=== Removed features - -==== Rivers have been removed - -Elasticsearch does not support rivers anymore. While we had first planned to -keep them around to ease migration, keeping support for rivers proved to be -challenging as it conflicted with other important changes that we wanted to -bring to 2.0 like synchronous dynamic mappings updates, so we eventually -decided to remove them entirely. See -link:/blog/deprecating_rivers[Deprecating Rivers] for more background about -why we took this decision. - -==== Facets have been removed - -Facets, deprecated since 1.0, have now been removed. Instead, use the much -more powerful and flexible <> framework. -This also means that Kibana 3 will not work with Elasticsearch 2.0. - -==== MVEL has been removed - -The MVEL scripting language has been removed. The default scripting language -is now Groovy. - -==== Delete-by-query is now a plugin - -The old delete-by-query functionality was fast but unsafe. It could lead to -document differences between the primary and replica shards, and could even -produce out of memory exceptions and cause the cluster to crash. - -This feature has been reimplemented using the <> and -<> APIs, which may be slower for queries which match -large numbers of documents, but is safe. - -Currently, a long running delete-by-query job cannot be cancelled, which is -one of the reasons that this functionality is only available as a plugin. You -can install the plugin with: - -[source,sh] ------------------- -./bin/elasticsearch-plugin install delete-by-query ------------------- - -See {plugins}/plugins-delete-by-query.html for more information. - -==== Multicast Discovery is now a plugin - -Support for multicast is very patchy. Linux doesn’t allow multicast listening on localhost, -while OS/X sends multicast broadcasts across all interfaces regardless of the configured -bind address. On top of that, some networks have multicast disabled by default. - -This feature has been moved to a plugin. The default discovery mechanism now uses -unicast, with a default setup which looks for the first 5 ports on localhost. If you -still need to use multicast discovery, you can install the plugin with: - -[source,sh] ------------------- -./bin/elasticsearch-plugin install discovery-multicast ------------------- - -==== `_shutdown` API - -The `_shutdown` API has been removed without a replacement. Nodes should be -managed via the operating system and the provided start/stop scripts. - -==== `murmur3` is now a plugin - -The `murmur3` field, which indexes hashes of the field values, has been moved -out of core and is available as a plugin. It can be installed as: - -[source,sh] ------------------- -./bin/elasticsearch-plugin install mapper-murmur3 ------------------- - -==== `_size` is now a plugin - -The `_size` meta-data field, which indexes the size in bytes of the original -JSON document, has been moved out of core and is available as a plugin. It -can be installed as: - -[source,sh] ------------------- -./bin/elasticsearch-plugin install mapper-size ------------------- - -==== Thrift and memcached transport - -The thrift and memcached transport plugins are no longer supported. Instead, use -either the HTTP transport (enabled by default) or the node or transport Java client. - -==== Bulk UDP - -The bulk UDP API has been removed. Instead, use the standard -<> API, or use UDP to send documents to Logstash first. - -==== MergeScheduler pluggability - -The merge scheduler is no longer pluggable. - diff --git a/docs/reference/migration/migrate_2_0/scripting.asciidoc b/docs/reference/migration/migrate_2_0/scripting.asciidoc deleted file mode 100644 index 495d2daa2c5..00000000000 --- a/docs/reference/migration/migrate_2_0/scripting.asciidoc +++ /dev/null @@ -1,103 +0,0 @@ -[[breaking_20_scripting_changes]] -=== Scripting changes - -==== Scripting syntax - -The syntax for scripts has been made consistent across all APIs. The accepted -format is as follows: - -Inline/Dynamic scripts:: -+ --- - -[source,js] ---------------- -"script": { - "inline": "doc['foo'].value + val", <1> - "lang": "groovy", <2> - "params": { "val": 3 } <3> -} ---------------- -<1> The inline script to execute. -<2> The optional language of the script. -<3> Any named parameters. --- - -Indexed scripts:: -+ --- -[source,js] ---------------- -"script": { - "id": "my_script_id", <1> - "lang": "groovy", <2> - "params": { "val": 3 } <3> -} ---------------- -<1> The ID of the indexed script. -<2> The optional language of the script. -<3> Any named parameters. --- - -File scripts:: -+ --- -[source,js] ---------------- -"script": { - "file": "my_file", <1> - "lang": "groovy", <2> - "params": { "val": 3 } <3> -} ---------------- -<1> The filename of the script, without the `.lang` suffix. -<2> The optional language of the script. -<3> Any named parameters. --- - -For example, an update request might look like this: - -[source,js] ---------------- -POST my_index/my_type/1/_update -{ - "script": { - "inline": "ctx._source.count += val", - "params": { "val": 3 } - }, - "upsert": { - "count": 0 - } -} ---------------- - -A short syntax exists for running inline scripts in the default scripting -language without any parameters: - -[source,js] ----------------- -GET _search -{ - "script_fields": { - "concat_fields": { - "script": "doc['one'].value + ' ' + doc['two'].value" - } - } -} ----------------- - -==== Scripting settings - -The `script.disable_dynamic` node setting has been replaced by fine-grained -script settings described in <>. - -==== Groovy scripts sandbox - -The Groovy sandbox and related settings have been removed. Groovy is now a -non-sandboxed scripting language, without any option to turn the sandbox on. - -==== Plugins making use of scripts - -Plugins that make use of scripts must register their own script context -through `ScriptModule`. Script contexts can be used as part of fine-grained -settings to enable/disable scripts selectively. diff --git a/docs/reference/migration/migrate_2_0/search.asciidoc b/docs/reference/migration/migrate_2_0/search.asciidoc deleted file mode 100644 index 036313077ff..00000000000 --- a/docs/reference/migration/migrate_2_0/search.asciidoc +++ /dev/null @@ -1,122 +0,0 @@ -[[breaking_20_search_changes]] -=== Search changes - -==== Partial fields - -Partial fields have been removed in favor of <>. - -==== `search_type=count` deprecated - -The `count` search type has been deprecated. All benefits from this search -type can now be achieved by using the (default) `query_then_fetch` search type -and setting `size` to `0`. - -==== The count api internally uses the search api - -The count api is now a shortcut to the search api with `size` set to 0. As a -result, a total failure will result in an exception being returned rather -than a normal response with `count` set to `0` and shard failures. - -==== All stored meta-fields returned by default - -Previously, meta-fields like `_routing`, `_timestamp`, etc would only be -included in the search results if specifically requested with the `fields` -parameter. Now, all meta-fields which have stored values will be returned by -default. Additionally, they are now returned at the top level (along with -`_index`, `_type`, and `_id`) instead of in the `fields` element. - -For instance, the following request: - -[source,sh] ---------------- -GET /my_index/_search?fields=foo ---------------- - -might return: - -[source,js] ---------------- -{ - [...] - "hits": { - "total": 1, - "max_score": 1, - "hits": [ - { - "_index": "my_index", - "_type": "my_type", - "_id": "1", - "_score": 1, - "_timestamp": 10000000, <1> - "fields": { - "foo" : [ "bar" ] - } - } - ] - } -} ---------------- -<1> The `_timestamp` is returned by default, and at the top level. - - -==== Script fields - -Script fields in 1.x were only returned as a single value. Even if the return -value of a script was a list, it would be returned as an array containing an -array: - -[source,js] ---------------- -"fields": { - "my_field": [ - [ - "v1", - "v2" - ] - ] -} ---------------- - -In elasticsearch 2.0, scripts that return a list of values are treated as -multivalued fields. The same example would return the following response, with -values in a single array. - -[source,js] ---------------- -"fields": { - "my_field": [ - "v1", - "v2" - ] -} ---------------- - -==== Timezone for date field - -Specifying the `time_zone` parameter in queries or aggregations on fields of -type `date` must now be either an ISO 8601 UTC offset, or a timezone id. For -example, the value `+1:00` must now be written as `+01:00`. - -==== Only highlight queried fields - -The default value for the `require_field_match` option has changed from -`false` to `true`, meaning that the highlighters will, by default, only take -the fields that were queried into account. - -This means that, when querying the `_all` field, trying to highlight on any -field other than `_all` will produce no highlighted snippets. Querying the -same fields that need to be highlighted is the cleaner solution to get -highlighted snippets back. Otherwise `require_field_match` option can be set -to `false` to ignore field names completely when highlighting. - -The postings highlighter doesn't support the `require_field_match` option -anymore, it will only highlight fields that were queried. - -==== Postings highlighter doesn't support `match_phrase_prefix` - -The `match` query with type set to `phrase_prefix` (or the -`match_phrase_prefix` query) is not supported by the postings highlighter. No -highlighted snippets will be returned. - - - diff --git a/docs/reference/migration/migrate_2_0/settings.asciidoc b/docs/reference/migration/migrate_2_0/settings.asciidoc deleted file mode 100644 index 06aa743a5d8..00000000000 --- a/docs/reference/migration/migrate_2_0/settings.asciidoc +++ /dev/null @@ -1,204 +0,0 @@ -[[breaking_20_setting_changes]] -=== Setting changes - -==== Command line flags - -Command line flags using single dash notation must be now specified as the first arguments. -For example if previously using: - -[source,sh] ---------------- -./elasticsearch --node.name=test_node -Des.path.conf=/opt/elasticsearch/conf/test_node ---------------- - -This will now need to be changed to: - -[source,sh] ---------------- -./elasticsearch -Des.path.conf=/opt/elasticsearch/conf/test_node --node.name=test_node ---------------- - -for the flag to take effect. - -[[migration-script-settings]] -==== Scripting settings - -The `script.disable_dynamic` node setting has been replaced by fine-grained -script settings described in the <>. -The following setting previously used to enable dynamic or inline scripts: - -[source,yaml] ---------------- -script.disable_dynamic: false ---------------- - -It should be replaced with the following two settings in `elasticsearch.yml` that -achieve the same result: - -[source,yaml] ---------------- -script.inline: true -script.indexed: true ---------------- - -==== Units required for time and byte-sized settings - -Any settings which accept time or byte values must now be specified with -units. For instance, it is too easy to set the `refresh_interval` to 1 -*millisecond* instead of 1 second: - -[source,js] ---------------- -PUT _settings -{ - "index.refresh_interval": 1 -} ---------------- - -In 2.0, the above request will throw an exception. Instead the refresh -interval should be set to `"1s"` for one second. - -==== Merge and merge throttling settings - -The tiered merge policy is now the only supported merge policy. These settings -have been removed: - -* `index.merge.policy.type` -* `index.merge.policy.min_merge_size` -* `index.merge.policy.max_merge_size` -* `index.merge.policy.merge_factor` -* `index.merge.policy.max_merge_docs` -* `index.merge.policy.calibrate_size_by_deletes` -* `index.merge.policy.min_merge_docs` -* `index.merge.policy.max_merge_docs` - -Merge throttling now uses a feedback loop to auto-throttle. These settings -have been removed: - -* `indices.store.throttle.type` -* `indices.store.throttle.max_bytes_per_sec` -* `index.store.throttle.type` -* `index.store.throttle.max_bytes_per_sec` - -==== Shadow replica settings - -The `node.enable_custom_paths` setting has been removed and replaced by the -`path.shared_data` setting to allow shadow replicas with custom paths to work -with the security manager. For example, if your previous configuration had: - -[source,yaml] ------- -node.enable_custom_paths: true ------- - -And you created an index using shadow replicas with `index.data_path` set to -`/opt/data/my_index` with the following: - -[source,js] --------------------------------------------------- -PUT /my_index -{ - "index": { - "number_of_shards": 1, - "number_of_replicas": 4, - "data_path": "/opt/data/my_index", - "shadow_replicas": true - } -} --------------------------------------------------- - -For 2.0, you will need to set `path.shared_data` to a parent directory of the -index's data_path, so: - -[source,yaml] ------------ -path.shared_data: /opt/data ------------ - -==== Resource watcher settings renamed - -The setting names for configuring the resource watcher have been renamed -to prevent clashes with the watcher plugin - -* `watcher.enabled` is now `resource.reload.enabled` -* `watcher.interval` is now `resource.reload.interval` -* `watcher.interval.low` is now `resource.reload.interval.low` -* `watcher.interval.medium` is now `resource.reload.interval.medium` -* `watcher.interval.high` is now `resource.reload.interval.high` - -==== index.gateway setting renamed - -* `index.gateway.local.sync` is now `index.translog.sync_interval` - -==== Hunspell dictionary configuration - -The parameter `indices.analysis.hunspell.dictionary.location` has been -removed, and `/hunspell` is always used. - -==== CORS allowed origins - -The CORS allowed origins setting, `http.cors.allow-origin`, no longer has a default value. Previously, the default value -was `*`, which would allow CORS requests from any origin and is considered insecure. The `http.cors.allow-origin` setting -should be specified with only the origins that should be allowed, like so: - -[source,yaml] ---------------- -http.cors.allow-origin: /https?:\/\/localhost(:[0-9]+)?/ ---------------- - -==== JSONP support - -JSONP callback support has now been removed. CORS should be used to access Elasticsearch -over AJAX instead: - -[source,yaml] ---------------- -http.cors.enabled: true -http.cors.allow-origin: /https?:\/\/localhost(:[0-9]+)?/ ---------------- - -==== In memory indices - -The `memory` / `ram` store (`index.store.type`) option was removed in -Elasticsearch. In-memory indices are no longer supported. - -==== Log messages truncated - -Log messages are now truncated at 10,000 characters. This can be changed in -the `logging.yml` configuration file with the `file.layout.conversionPattern` -setting. - -==== Custom config file - -It is no longer possible to specify a custom config file with the `CONF_FILE` -environment variable, or the `-Des.config`, `-Des.default.config`, or -`-Delasticsearch.config` parameters. - -Instead, the config file must be named `elasticsearch.yml` and must be located -in the default `config/` directory, unless a custom config directory is specified. - -The location of a custom config directory may be specified as follows: - -[source,sh] --------------- -./bin/elasticsearch --path.conf=/path/to/conf/dir -./bin/elasticsearch-plugin -Des.path.conf=/path/to/conf/dir install analysis-icu --------------- - -When using the RPM or debian packages, the plugin script and the -init/service scripts will consult the `CONF_DIR` environment variable -to check for a custom config location. The value of the `CONF_DIR` -variable can be set in the environment config file which is located either in -`/etc/default/elasticsearch` or `/etc/sysconfig/elasticsearch`. - -==== Custom analysis file paths - -It is no longer possible to set custom file path outside `CONF_DIR` for `*_path` settings -in <> or <> filters. -You must specify either relative path to `CONF_DIR` location or absolute path inside `CONF_DIR` location. - -==== `ES_CLASSPATH removed` - -The `ES_CLASSPATH` environment variable is no longer used to set the class -path. External libraries should preferably be loaded using the plugin -mechanism or, if you really must, be copied to the `lib/` directory. diff --git a/docs/reference/migration/migrate_2_0/snapshot_restore.asciidoc b/docs/reference/migration/migrate_2_0/snapshot_restore.asciidoc deleted file mode 100644 index c9b222abdc8..00000000000 --- a/docs/reference/migration/migrate_2_0/snapshot_restore.asciidoc +++ /dev/null @@ -1,38 +0,0 @@ -[[breaking_20_snapshot_and_restore_changes]] -=== Snapshot and Restore changes - -==== File-system repositories must be whitelisted - -Locations of the shared file system repositories and the URL repositories with -`file:` URLs now have to be registered before starting Elasticsearch using the -`path.repo` setting. The `path.repo` setting can contain one or more -repository locations: - -[source,yaml] ---------------- -path.repo: ["/mnt/daily", "/mnt/weekly"] ---------------- - -If the repository location is specified as an absolute path it has to start -with one of the locations specified in `path.repo`. If the location is -specified as a relative path, it will be resolved against the first location -specified in the `path.repo` setting. - -==== URL repositories must be whitelisted - -URL repositories with `http:`, `https:`, and `ftp:` URLs have to be -whitelisted before starting Elasticsearch with the -`repositories.url.allowed_urls` setting. This setting supports wildcards in -the place of host, path, query, and fragment. For example: - -[source,yaml] ------------------------------------ -repositories.url.allowed_urls: ["http://www.example.org/root/*", "https://*.mydomain.com/*?*#*"] ------------------------------------ - -==== Wildcard expansion - -The obsolete parameters `expand_wildcards_open` and `expand_wildcards_close` -are no longer supported by the snapshot and restore operations. These -parameters have been replaced by a single `expand_wildcards` parameter. See -<> for more. diff --git a/docs/reference/migration/migrate_2_0/stats.asciidoc b/docs/reference/migration/migrate_2_0/stats.asciidoc deleted file mode 100644 index dc80ecd83ec..00000000000 --- a/docs/reference/migration/migrate_2_0/stats.asciidoc +++ /dev/null @@ -1,52 +0,0 @@ -[[breaking_20_stats_info_and_literal_cat_literal_changes]] -=== Stats, info, and `cat` changes - -==== Sigar removed - -We no longer ship the Sigar library for operating system dependent statistics, -as it no longer seems to be maintained. Instead, we rely on the statistics -provided by the JVM. This has resulted in a number of changes to the node -info, and node stats responses: - -* `network.*` has been removed from nodes info and nodes stats. -* `fs.*.dev` and `fs.*.disk*` have been removed from nodes stats. -* `os.*` has been removed from nodes stats, except for `os.timestamp`, - `os.load_average`, `os.mem.*`, and `os.swap.*`. -* `os.mem.total` and `os.swap.total` have been removed from nodes info. -* `process.mem.resident` and `process.mem.share` have been removed from node stats. - -==== Removed `id_cache` from stats apis - -Removed `id_cache` metric from nodes stats, indices stats and cluster stats -apis. This metric has also been removed from the shards cat, indices cat and -nodes cat apis. Parent/child memory is now reported under fielddata, because -it has internally be using fielddata for a while now. - -To just see how much parent/child related field data is taking, the -`fielddata_fields` option can be used on the stats apis. Indices stats -example: - -[source,js] --------------------------------------------------- -GET /_stats/fielddata?fielddata_fields=_parent --------------------------------------------------- - -==== Percolator stats - -The total time spent running percolator queries is now called `percolate.time` -instead of `percolate.get_time`. - -==== Cluster state REST API - -The cluster state API doesn't return the `routing_nodes` section anymore when -`routing_table` is requested. The newly introduced `routing_nodes` flag can be -used separately to control whether `routing_nodes` should be returned. - -==== Index status API - -The deprecated index status API has been removed. - -==== Nodes Stats API - -Queue lengths are now reported as basic numeric so they can easily processed by code. Before we used a human -readable format. For example, a queue with 1,000 items is now reported as `1000` instead of `1k`. diff --git a/docs/reference/migration/migrate_2_0/striping.asciidoc b/docs/reference/migration/migrate_2_0/striping.asciidoc deleted file mode 100644 index 2e80f29c774..00000000000 --- a/docs/reference/migration/migrate_2_0/striping.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -[[breaking_20_multiple_literal_data_path_literal_striping]] -=== Multiple `path.data` striping - -Previously, if the `path.data` setting listed multiple data paths, then a -shard would be ``striped'' across all paths by writing a whole file to each -path in turn (in accordance with the `index.store.distributor` setting). The -result was that files from a single segment in a shard could be spread across -multiple disks, and the failure of any one disk could corrupt multiple shards. - -This striping is no longer supported. Instead, different shards may be -allocated to different paths, but all of the files in a single shard will be -written to the same path. - -If striping is detected while starting Elasticsearch 2.0.0 or later, *all of -the files belonging to the same shard will be migrated to the same path*. If -there is not enough disk space to complete this migration, the upgrade will be -cancelled and can only be resumed once enough disk space is made available. - -The `index.store.distributor` setting has also been removed. - - diff --git a/docs/reference/migration/migrate_2_1.asciidoc b/docs/reference/migration/migrate_2_1.asciidoc deleted file mode 100644 index 454a57f96bc..00000000000 --- a/docs/reference/migration/migrate_2_1.asciidoc +++ /dev/null @@ -1,87 +0,0 @@ -[[breaking-changes-2.1]] -== Breaking changes in 2.1 - -This section discusses the changes that you need to be aware of when migrating -your application to Elasticsearch 2.1. - -* <> -* <> -* <> -* <> -* <> -* <> - -[[breaking_21_search_changes]] -=== Search changes - -==== `search_type=scan` deprecated - -The `scan` search type has been deprecated. All benefits from this search -type can now be achieved by doing a scroll request that sorts documents in -`_doc` order, for instance: - -[source,sh] ---------------- -GET /my_index/_search?scroll=2m -{ - "sort": [ - "_doc" - ] -} ---------------- - -Scroll requests sorted by `_doc` have been optimized to more efficiently resume -from where the previous request stopped, so this will have the same performance -characteristics as the former `scan` search type. - -==== from + size limits - -Elasticsearch will now return an error message if a query's `from` + `size` is -more than the `index.max_result_window` parameter. This parameter defaults to -10,000 which is safe for almost all clusters. Values higher than can consume -significant chunks of heap memory per search and per shard executing the -search. It's safest to leave this value as it is an use the scroll api for any -deep scrolling but this setting is dynamic so it can raised or lowered as -needed. - -[[breaking_21_update_changes]] -=== Update changes - -==== Updates now `detect_noop` by default - -We've switched the default value of the `detect_noop` option from `false` to -`true`. This means that Elasticsearch will ignore updates that don't change -source unless you explicitly set `"detect_noop": false`. `detect_noop` was -always computationally cheap compared to the expense of the update which can be -thought of as a delete operation followed by an index operation. - -[[breaking_21_removed_features]] -=== Removed features - -==== `indices.fielddata.cache.expire` - -The experimental feature `indices.fielddata.cache.expire` has been removed. -For indices that have this setting configured, this config will be ignored. - -[[breaking_21_more_like_this]] -=== More Like This - -The MoreLikeThisQueryBuilder#ignoreLike methods have been deprecated in favor -of using the unlike methods. - -MoreLikeThisBuilder#addItem has been deprecated in favor of using -MoreLikeThisBuilder#addLikeItem. - -[[breaking_21_nested_sorting]] -=== Nested sorting - -If sorting on field inside a nested object then the `nested_path` should be specified. -Before there was an attempt to resolve the nested path automatically, but that was sometimes incorrect. -To avoid confusion the `nested_path` should always be specified. - -[[breaking_21_index_apis]] -=== Index APIs - -==== Optimize API - -The Optimize API has been deprecated, all new optimize actions should use the new Force Merge API. diff --git a/docs/reference/migration/migrate_2_2.asciidoc b/docs/reference/migration/migrate_2_2.asciidoc deleted file mode 100644 index 39c059e7f47..00000000000 --- a/docs/reference/migration/migrate_2_2.asciidoc +++ /dev/null @@ -1,55 +0,0 @@ -[[breaking-changes-2.2]] -== Breaking changes in 2.2 - -This section discusses the changes that you need to be aware of when migrating -your application to Elasticsearch 2.2. - -[float] -=== Scripting and security - -The Java Security Manager is being used to lock down the privileges available -to the scripting languages and to restrict the classes they are allowed to -load to a predefined whitelist. These changes may cause scripts which worked -in earlier versions to fail. See <> for more -details. - -[float] -=== Field stats API - -The field stats' response format has been changed for number based and date -fields. The `min_value` and `max_value` elements now return values as number -and the new `min_value_as_string` and `max_value_as_string` return the values -as string. - -[float] -=== Default logging using systemd - -In previous versions of Elasticsearch using systemd, the default logging -configuration routed standard output to `/dev/null` and standard error to -the journal. However, there are often critical error messages at -startup that are logged to standard output rather than standard error -and these error messages would be lost to the ether. The default has -changed to now route standard output to the journal and standard error -to inherit this setting (these are the defaults for systemd). These -settings can be modified by editing the `elasticsearch.service` file. - -[float] -=== Cloud AWS Plugin - -Proxy settings have been deprecated and renamed: - -* from `cloud.aws.proxy_host` to `cloud.aws.proxy.host` -* from `cloud.aws.ec2.proxy_host` to `cloud.aws.ec2.proxy.host` -* from `cloud.aws.s3.proxy_host` to `cloud.aws.s3.proxy.host` -* from `cloud.aws.proxy_port` to `cloud.aws.proxy.port` -* from `cloud.aws.ec2.proxy_port` to `cloud.aws.ec2.proxy.port` -* from `cloud.aws.s3.proxy_port` to `cloud.aws.s3.proxy.port` - -If you are using proxy settings, update your settings as deprecated ones will -be removed in next major version. - -[float] -=== Multicast plugin deprecated - -The `discovery-multicast` plugin has been deprecated in 2.2.0 and has -been removed in 3.0.0. diff --git a/docs/reference/migration/migrate_2_3.asciidoc b/docs/reference/migration/migrate_2_3.asciidoc deleted file mode 100644 index 0d741e2adb2..00000000000 --- a/docs/reference/migration/migrate_2_3.asciidoc +++ /dev/null @@ -1,19 +0,0 @@ -[[breaking-changes-2.3]] -== Breaking changes in 2.3 - -This section discusses the changes that you need to be aware of when migrating -your application to Elasticsearch 2.3. - -* <> - -[[breaking_23_index_apis]] -=== Mappings - -==== Limit to the number of `nested` fields - -Indexing a document with 100 nested fields actually indexes 101 documents as each nested -document is indexed as a separate document. To safeguard against ill-defined mappings -the number of nested fields that can be defined per index has been limited to 50. -This default limit can be changed with the index setting `index.mapping.nested_fields.limit`. -Note that the limit is only checked when new indices are created or mappings are updated. It -will thus only affect existing pre-2.3 indices if their mapping is changed. diff --git a/docs/reference/migration/migrate_5_0.asciidoc b/docs/reference/migration/migrate_5_0.asciidoc index 6fcf566fdcb..f4aaacee35e 100644 --- a/docs/reference/migration/migrate_5_0.asciidoc +++ b/docs/reference/migration/migrate_5_0.asciidoc @@ -4,843 +4,63 @@ This section discusses the changes that you need to be aware of when migrating your application to Elasticsearch 5.0. +[float] +=== Indices created before 5.0 + +Elasticsearch 5.0 can read indices created in version 2.0 and above. If any +of your indices were created before 2.0 you will need to upgrade to the +latest 2.x version of Elasticsearch first, in order to upgrade your indices or +to delete the old indices. Elasticsearch will not start in the presence of old +indices. To upgrade 2.x indices, first start a node which have access to all +the data folders and let it upgrade all the indices before starting up rest of +the cluster. + +[IMPORTANT] +.Reindex indices from Elasticseach 1.x or before +========================================= + +Indices created in Elasticsearch 1.x or before will need to be reindexed with +Elasticsearch 2.x in order to be readable by Elasticsearch 5.x. The easiest +way to do this is to upgrade to Elasticsearch 2.3 or later and to use the +`reindex` API. + +========================================= + +[float] +=== Also see: + * <> +* <> +* <> +* <> +* <> +* <> * <> * <> -* <> -* <> -* <> -* <> * <> -* <> -* <> -* <> -* <> -* <> * <> -* <> -* <> -* <> +* <> -[[breaking_50_search_changes]] -=== Warmers +include::migrate_5_0/search.asciidoc[] -Thanks to several changes like doc values by default or disk-based norms, -warmers have become quite useless. As a consequence, warmers and the warmer -API have been removed: it is not possible anymore to register queries that -will run before a new IndexSearcher is published. +include::migrate_5_0/mapping.asciidoc[] -Don't worry if you have warmers defined on your indices, they will simply be -ignored when upgrading to 5.0. +include::migrate_5_0/percolator.asciidoc[] -=== Search changes +include::migrate_5_0/index-apis.asciidoc[] -==== `search_type=count` removed +include::migrate_5_0/settings.asciidoc[] -The `count` search type was deprecated since version 2.0.0 and is now removed. -In order to get the same benefits, you just need to set the value of the `size` -parameter to `0`. +include::migrate_5_0/allocation.asciidoc[] -For instance, the following request: +include::migrate_5_0/rest.asciidoc[] -[source,sh] ---------------- -GET /my_index/_search?search_type=count -{ - "aggs": { - "my_terms": { - "terms": { - "field": "foo" - } - } - } -} ---------------- +include::migrate_5_0/cat.asciidoc[] -can be replaced with: +include::migrate_5_0/java.asciidoc[] -[source,sh] ---------------- -GET /my_index/_search -{ - "size": 0, - "aggs": { - "my_terms": { - "terms": { - "field": "foo" - } - } - } -} ---------------- +include::migrate_5_0/packaging.asciidoc[] -==== `search_type=scan` removed +include::migrate_5_0/plugins.asciidoc[] -The `scan` search type was deprecated since version 2.1.0 and is now removed. -All benefits from this search type can now be achieved by doing a scroll -request that sorts documents in `_doc` order, for instance: -[source,sh] ---------------- -GET /my_index/_search?scroll=2m -{ - "sort": [ - "_doc" - ] -} ---------------- - -Scroll requests sorted by `_doc` have been optimized to more efficiently resume -from where the previous request stopped, so this will have the same performance -characteristics as the former `scan` search type. - -==== Boost accuracy for queries on `_all` - -Per-field boosts on the `_all` are now compressed on a single byte instead of -4 bytes previously. While this will make the index more space-efficient, this -also means that the boosts will be less accurately encoded. - -[[breaking_50_rest_api_changes]] -=== REST API changes - -==== id values longer than 512 bytes are rejected - -When specifying an `_id` value longer than 512 bytes, the request will be -rejected. - -==== search exists api removed - -The search exists api has been removed in favour of using the search api with -`size` set to `0` and `terminate_after` set to `1`. - -==== `/_optimize` endpoint removed - -The deprecated `/_optimize` endpoint has been removed. The `/_forcemerge` -endpoint should be used in lieu of optimize. - -The `GET` HTTP verb for `/_forcemerge` is no longer supported, please use the -`POST` HTTP verb. - -==== Deprecated queries removed - -The following deprecated queries have been removed: - -* `filtered`: use `bool` query instead, which supports `filter` clauses too -* `and`: use `must` clauses in a `bool` query instead -* `or`: use should clauses in a `bool` query instead -* `limit`: use `terminate_after` parameter instead -* `fquery`: obsolete after filters and queries have been merged -* `query`: obsolete after filters and queries have been merged - -==== Unified fuzziness parameter - -* Removed support for the deprecated `min_similarity` parameter in `fuzzy query`, in favour of `similarity`. -* Removed support for the deprecated `fuzzy_min_sim` parameter in `query_string` query, in favour of `similarity`. -* Removed support for the deprecated `edit_distance` parameter in completion suggester, in favour of `similarity`. - -==== indices query - -Removed support for the deprecated `filter` and `no_match_filter` fields in `indices` query, -in favour of `query` and `no_match_query`. - -==== nested query - -Removed support for the deprecated `filter` fields in `nested` query, in favour of `query`. - -==== terms query - -Removed support for the deprecated `minimum_should_match` and `disable_coord` in `terms` query, use `bool` query instead. -Removed also support for the deprecated `execution` parameter. - -==== function_score query - -Removed support for the top level `filter` element in `function_score` query, replaced by `query`. - -==== highlighters - -Removed support for multiple highlighter names, the only supported ones are: `plain`, `fvh` and `postings`. - -==== top level filter - -Removed support for the deprecated top level `filter` in the search api, replaced by `post_filter`. - -==== `query_binary` and `filter_binary` removed - -Removed support for the undocumented `query_binary` and `filter_binary` sections of a search request. - -==== `span_near`'s' `collect_payloads` deprecated - -Payloads are now loaded when needed. - -[[breaking_50_cat_api]] -=== CAT API changes - -==== Use Accept header for specifying response media type - -Previous versions of Elasticsearch accepted the Content-type header -field for controlling the media type of the response in the cat API. -This is in opposition to the HTTP spec which specifies the Accept -header field for this purpose. Elasticsearch now uses the Accept header -field and support for using the Content-Type header field for this -purpose has been removed. - -==== Host field removed from the cat nodes API - -The `host` field has been removed from the cat nodes API as its value -is always equal to the `ip` field. The `name` field is available in the -cat nodes API and should be used instead of the `host` field. - -[[breaking_50_parent_child_changes]] -=== Parent/Child changes - -The `children` aggregation, parent child inner hits and `has_child` and `has_parent` queries will not work on indices -with `_parent` field mapping created before version `2.0.0`. The data of these indices need to be re-indexed into a new index. - -The format of the join between parent and child documents have changed with the `2.0.0` release. The old -format can't read from version `5.0.0` and onwards. The new format allows for a much more efficient and -scalable join between parent and child documents and the join data structures are stored on disk -data structures as opposed as before the join data structures were stored in the jvm heap space. - -==== `score_type` has been removed - -The `score_type` option has been removed from the `has_child` and `has_parent` queries in favour of the `score_mode` option -which does the exact same thing. - -==== `sum` score mode removed - -The `sum` score mode has been removed in favour of the `total` mode which does the same and is already available in -previous versions. - -==== `max_children` option - -When `max_children` was set to `0` on the `has_child` query then there was no upper limit on how many children documents -are allowed to match. This has changed and `0` now really means to zero child documents are allowed. If no upper limit -is needed then the `max_children` option shouldn't be defined at all on the `has_child` query. - -==== `_parent` field no longer indexed - -The join between parent and child documents no longer relies on indexed fields and therefor from `5.0.0` onwards -the `_parent` indexed field won't be indexed. In order to find documents that referrer to a specific parent id -the new `parent_id` query can be used. The get response and hits inside the search response remain to include -the parent id under the `_parent` key. - -[[breaking_50_settings_changes]] -=== Settings changes - -From Elasticsearch 5.0 on all settings are validated before they are applied. Node level and default index -level settings are validated on node startup, dynamic cluster and index setting are validated before they are updated/added -to the cluster state. Every setting must be a _known_ setting or in other words all settings must be registered with the -node or transport client they are used with. This implies that plugins that define custom settings must register all of their -settings during pluging loading using the `SettingsModule#registerSettings(Setting)` method. - -==== Node settings - -The `name` setting has been removed and is replaced by `node.name`. Usage of `-Dname=some_node_name` is not supported -anymore. - -==== Transport Settings - -All settings with a `netty` infix have been replaced by their already existing `transport` synonyms. For instance `transport.netty.bind_host` is -no longer supported and should be replaced by the superseding setting `transport.bind_host`. - -==== Analysis settings - -The `index.analysis.analyzer.default_index` analyzer is not supported anymore. -If you wish to change the analyzer to use for indexing, change the -`index.analysis.analyzer.default` analyzer instead. - -==== Ping timeout settings - -Previously, there were three settings for the ping timeout: `discovery.zen.initial_ping_timeout`, -`discovery.zen.ping.timeout` and `discovery.zen.ping_timeout`. The former two have been removed and -the only setting key for the ping timeout is now `discovery.zen.ping_timeout`. The default value for -ping timeouts remains at three seconds. - -==== Recovery settings - -Recovery settings deprecated in 1.x have been removed: - - * `index.shard.recovery.translog_size` is superseded by `indices.recovery.translog_size` - * `index.shard.recovery.translog_ops` is superseded by `indices.recovery.translog_ops` - * `index.shard.recovery.file_chunk_size` is superseded by `indices.recovery.file_chunk_size` - * `index.shard.recovery.concurrent_streams` is superseded by `indices.recovery.concurrent_streams` - * `index.shard.recovery.concurrent_small_file_streams` is superseded by `indices.recovery.concurrent_small_file_streams` - * `indices.recovery.max_size_per_sec` is superseded by `indices.recovery.max_bytes_per_sec` - -If you are using any of these settings please take the time and review their purpose. All of the settings above are considered -_expert settings_ and should only be used if absolutely necessary. If you have set any of the above setting as persistent -cluster settings please use the settings update API and set their superseded keys accordingly. - -The following settings have been removed without replacement - - * `indices.recovery.concurrent_small_file_streams` - recoveries are now single threaded. The number of concurrent outgoing recoveries are throttled via allocation deciders - * `indices.recovery.concurrent_file_streams` - recoveries are now single threaded. The number of concurrent outgoing recoveries are throttled via allocation deciders - -==== Translog settings - -The `index.translog.flush_threshold_ops` setting is not supported anymore. In order to control flushes based on the transaction log -growth use `index.translog.flush_threshold_size` instead. Changing the translog type with `index.translog.fs.type` is not supported -anymore, the `buffered` implementation is now the only available option and uses a fixed `8kb` buffer. - -The translog by default is fsynced on a request basis such that the ability to fsync on every operation is not necessary anymore. In-fact it can -be a performance bottleneck and it's trappy since it enabled by a special value set on `index.translog.sync_interval`. `index.translog.sync_interval` -now doesn't accept a value less than `100ms` which prevents fsyncing too often if async durability is enabled. The special value `0` is not supported anymore. - -==== Request Cache Settings - -The deprecated settings `index.cache.query.enable` and `indices.cache.query.size` have been removed and are replaced with -`index.requests.cache.enable` and `indices.requests.cache.size` respectively. - -`indices.requests.cache.clean_interval` has been replaced with `indices.cache.clean_interval` and is no longer supported. - -==== Field Data Cache Settings - -`indices.fielddata.cache.clean_interval` has been replaced with `indices.cache.clean_interval` and is no longer supported. - -==== Allocation settings - -Allocation settings deprecated in 1.x have been removed: - - * `cluster.routing.allocation.concurrent_recoveries` is superseded by `cluster.routing.allocation.node_concurrent_recoveries` - -Please change the setting in your configuration files or in the clusterstate to use the new settings instead. - -==== Similarity settings - -The 'default' similarity has been renamed to 'classic'. - -==== Indexing settings - -`indices.memory.min_shard_index_buffer_size` and `indices.memory.max_shard_index_buffer_size` are removed since Elasticsearch now allows any one shard to any -amount of heap as long as the total indexing buffer heap used across all shards is below the node's `indices.memory.index_buffer_size` (default: 10% of the JVM heap) - -==== Removed es.max-open-files - -Setting the system property es.max-open-files to true to get -Elasticsearch to print the number of maximum open files for the -Elasticsearch process has been removed. This same information can be -obtained from the <> API, and a warning is logged -on startup if it is set too low. - -==== Removed es.netty.gathering - -Disabling Netty from using NIO gathering could be done via the escape -hatch of setting the system property "es.netty.gathering" to "false". -Time has proven enabling gathering by default is a non-issue and this -non-documented setting has been removed. - -==== Removed es.useLinkedTransferQueue - -The system property `es.useLinkedTransferQueue` could be used to -control the queue implementation used in the cluster service and the -handling of ping responses during discovery. This was an undocumented -setting and has been removed. - -[[breaking_50_mapping_changes]] -=== Mapping changes - -==== Default doc values settings - -Doc values are now also on by default on numeric and boolean fields that are -not indexed. - -==== Transform removed - -The `transform` feature from mappings has been removed. It made issues very hard to debug. - -==== Default number mappings - -When a floating-point number is encountered, it is now dynamically mapped as a -float by default instead of a double. The reasoning is that floats should be -more than enough for most cases but would decrease storage requirements -significantly. - -==== `index` property - -On all types but `string`, the `index` property now only accepts `true`/`false` -instead of `not_analyzed`/`no`. The `string` field still accepts -`analyzed`/`not_analyzed`/`no`. - -==== ++_source++'s `format` option - -The `_source` mapping does not support the `format` option anymore. This option -will still be accepted for indices created before the upgrade to 5.0 for backward -compatibility, but it will have no effect. Indices created on or after 5.0 will -reject this option. - -==== Object notation - -Core types don't support the object notation anymore, which allowed to provide -values as follows: - -[source,json] ---------------- -{ - "value": "field_value", - "boost": 42 -} ---------------- - -==== `fielddata.format` - -Setting `fielddata.format: doc_values` in the mappings used to implicitly -enable doc values on a field. This no longer works: the only way to enable or -disable doc values is by using the `doc_values` property of mappings. - - -[[breaking_50_plugins]] -=== Plugin changes - -The command `bin/plugin` has been renamed to `bin/elasticsearch-plugin`. -The structure of the plugin has changed. All the plugin files must be contained in a directory called `elasticsearch`. -If you use the gradle build, this structure is automatically generated. - -==== Site plugins removed - -Site plugins have been removed. It is recommended to migrate site plugins to Kibana plugins. - -==== Multicast plugin removed - -Multicast has been removed. Use unicast discovery, or one of the cloud discovery plugins. - -==== Plugins with custom query implementations - -Plugins implementing custom queries need to implement the `fromXContent(QueryParseContext)` method in their -`QueryParser` subclass rather than `parse`. This method will take care of parsing the query from `XContent` format -into an intermediate query representation that can be streamed between the nodes in binary format, effectively the -query object used in the java api. Also, the query parser needs to implement the `getBuilderPrototype` method that -returns a prototype of the `NamedWriteable` query, which allows to deserialize an incoming query by calling -`readFrom(StreamInput)` against it, which will create a new object, see usages of `Writeable`. The `QueryParser` -also needs to declare the generic type of the query that it supports and it's able to parse. -The query object can then transform itself into a lucene query through the new `toQuery(QueryShardContext)` method, -which returns a lucene query to be executed on the data node. - -Similarly, plugins implementing custom score functions need to implement the `fromXContent(QueryParseContext)` -method in their `ScoreFunctionParser` subclass rather than `parse`. This method will take care of parsing -the function from `XContent` format into an intermediate function representation that can be streamed between -the nodes in binary format, effectively the function object used in the java api. Also, the query parser needs -to implement the `getBuilderPrototype` method that returns a prototype of the `NamedWriteable` function, which -allows to deserialize an incoming function by calling `readFrom(StreamInput)` against it, which will create a -new object, see usages of `Writeable`. The `ScoreFunctionParser` also needs to declare the generic type of the -function that it supports and it's able to parse. The function object can then transform itself into a lucene -function through the new `toFunction(QueryShardContext)` method, which returns a lucene function to be executed -on the data node. - -==== Cloud AWS plugin changes - -Cloud AWS plugin has been split in two plugins: - -* {plugins}/discovery-ec2.html[Discovery EC2 plugin] -* {plugins}/repository-s3.html[Repository S3 plugin] - -Proxy settings for both plugins have been renamed: - -* from `cloud.aws.proxy_host` to `cloud.aws.proxy.host` -* from `cloud.aws.ec2.proxy_host` to `cloud.aws.ec2.proxy.host` -* from `cloud.aws.s3.proxy_host` to `cloud.aws.s3.proxy.host` -* from `cloud.aws.proxy_port` to `cloud.aws.proxy.port` -* from `cloud.aws.ec2.proxy_port` to `cloud.aws.ec2.proxy.port` -* from `cloud.aws.s3.proxy_port` to `cloud.aws.s3.proxy.port` - -==== Cloud Azure plugin changes - -Cloud Azure plugin has been split in three plugins: - -* {plugins}/discovery-azure.html[Discovery Azure plugin] -* {plugins}/repository-azure.html[Repository Azure plugin] -* {plugins}/store-smb.html[Store SMB plugin] - -If you were using the `cloud-azure` plugin for snapshot and restore, you had in `elasticsearch.yml`: - -[source,yaml] ------ -cloud: - azure: - storage: - account: your_azure_storage_account - key: your_azure_storage_key ------ - -You need to give a unique id to the storage details now as you can define multiple storage accounts: - -[source,yaml] ------ -cloud: - azure: - storage: - my_account: - account: your_azure_storage_account - key: your_azure_storage_key ------ - - -==== Cloud GCE plugin changes - -Cloud GCE plugin has been renamed to {plugins}/discovery-gce.html[Discovery GCE plugin]. - - -==== Mapper Attachments plugin deprecated - -Mapper attachments has been deprecated. Users should use now the {plugins}/ingest-attachment.html[`ingest-attachment`] -plugin. - - -[[breaking_50_java_api_changes]] -=== Java API changes - -==== Count api has been removed - -The deprecated count api has been removed from the Java api, use the search api instead and set size to 0. - -The following call - -[source,java] ------ -client.prepareCount(indices).setQuery(query).get(); ------ - -can be replaced with - -[source,java] ------ -client.prepareSearch(indices).setSource(new SearchSourceBuilder().size(0).query(query)).get(); ------ - -==== BoostingQueryBuilder - -Removed setters for mandatory positive/negative query. Both arguments now have -to be supplied at construction time already and have to be non-null. - -==== SpanContainingQueryBuilder - -Removed setters for mandatory big/little inner span queries. Both arguments now have -to be supplied at construction time already and have to be non-null. Updated -static factory methods in QueryBuilders accordingly. - -==== SpanOrQueryBuilder - -Making sure that query contains at least one clause by making initial clause mandatory -in constructor. - -==== SpanNearQueryBuilder - -Removed setter for mandatory slop parameter, needs to be set in constructor now. Also -making sure that query contains at least one clause by making initial clause mandatory -in constructor. Updated the static factory methods in QueryBuilders accordingly. - -==== SpanNotQueryBuilder - -Removed setter for mandatory include/exclude span query clause, needs to be set in constructor now. -Updated the static factory methods in QueryBuilders and tests accordingly. - -==== SpanWithinQueryBuilder - -Removed setters for mandatory big/little inner span queries. Both arguments now have -to be supplied at construction time already and have to be non-null. Updated -static factory methods in QueryBuilders accordingly. - -==== QueryFilterBuilder - -Removed the setter `queryName(String queryName)` since this field is not supported -in this type of query. Use `FQueryFilterBuilder.queryName(String queryName)` instead -when in need to wrap a named query as a filter. - -==== WrapperQueryBuilder - -Removed `wrapperQueryBuilder(byte[] source, int offset, int length)`. Instead simply -use `wrapperQueryBuilder(byte[] source)`. Updated the static factory methods in -QueryBuilders accordingly. - -==== QueryStringQueryBuilder - -Removed ability to pass in boost value using `field(String field)` method in form e.g. `field^2`. -Use the `field(String, float)` method instead. - -==== Operator - -Removed the enums called `Operator` from `MatchQueryBuilder`, `QueryStringQueryBuilder`, -`SimpleQueryStringBuilder`, and `CommonTermsQueryBuilder` in favour of using the enum -defined in `org.elasticsearch.index.query.Operator` in an effort to consolidate the -codebase and avoid duplication. - -==== queryName and boost support - -Support for `queryName` and `boost` has been streamlined to all of the queries. That is -a breaking change till queries get sent over the network as serialized json rather -than in `Streamable` format. In fact whenever additional fields are added to the json -representation of the query, older nodes might throw error when they find unknown fields. - -==== InnerHitsBuilder - -InnerHitsBuilder now has a dedicated addParentChildInnerHits and addNestedInnerHits methods -to differentiate between inner hits for nested vs. parent / child documents. This change -makes the type / path parameter mandatory. - -==== MatchQueryBuilder - -Moving MatchQueryBuilder.Type and MatchQueryBuilder.ZeroTermsQuery enum to MatchQuery.Type. -Also reusing new Operator enum. - -==== MoreLikeThisQueryBuilder - -Removed `MoreLikeThisQueryBuilder.Item#id(String id)`, `Item#doc(BytesReference doc)`, -`Item#doc(XContentBuilder doc)`. Use provided constructors instead. - -Removed `MoreLikeThisQueryBuilder#addLike` in favor of texts and/or items being provided -at construction time. Using arrays there instead of lists now. - -Removed `MoreLikeThisQueryBuilder#addUnlike` in favor to using the `unlike` methods -which take arrays as arguments now rather than the lists used before. - -The deprecated `docs(Item... docs)`, `ignoreLike(Item... docs)`, -`ignoreLike(String... likeText)`, `addItem(Item... likeItems)` have been removed. - -==== GeoDistanceQueryBuilder - -Removing individual setters for lon() and lat() values, both values should be set together - using point(lon, lat). - -==== GeoDistanceRangeQueryBuilder - -Removing setters for to(Object ...) and from(Object ...) in favour of the only two allowed input -arguments (String, Number). Removing setter for center point (point(), geohash()) because parameter -is mandatory and should already be set in constructor. -Also removing setters for lt(), lte(), gt(), gte() since they can all be replaced by equivalent -calls to to/from() and inludeLower()/includeUpper(). - -==== GeoPolygonQueryBuilder - -Require shell of polygon already to be specified in constructor instead of adding it pointwise. -This enables validation, but makes it necessary to remove the addPoint() methods. - -==== MultiMatchQueryBuilder - -Moving MultiMatchQueryBuilder.ZeroTermsQuery enum to MatchQuery.ZeroTermsQuery. -Also reusing new Operator enum. - -Removed ability to pass in boost value using `field(String field)` method in form e.g. `field^2`. -Use the `field(String, float)` method instead. - -==== MissingQueryBuilder - -The MissingQueryBuilder which was deprecated in 2.2.0 is removed. As a replacement use ExistsQueryBuilder -inside a mustNot() clause. So instead of using `new ExistsQueryBuilder(name)` now use -`new BoolQueryBuilder().mustNot(new ExistsQueryBuilder(name))`. - -==== NotQueryBuilder - -The NotQueryBuilder which was deprecated in 2.1.0 is removed. As a replacement use BoolQueryBuilder -with added mustNot() clause. So instead of using `new NotQueryBuilder(filter)` now use -`new BoolQueryBuilder().mustNot(filter)`. - -==== TermsQueryBuilder - -Remove the setter for `termsLookup()`, making it only possible to either use a TermsLookup object or -individual values at construction time. Also moving individual settings for the TermsLookup (lookupIndex, -lookupType, lookupId, lookupPath) to the separate TermsLookup class, using constructor only and moving -checks for validation there. Removed `TermsLookupQueryBuilder` in favour of `TermsQueryBuilder`. - -==== FunctionScoreQueryBuilder - -`add` methods have been removed, all filters and functions must be provided as constructor arguments by -creating an array of `FunctionScoreQueryBuilder.FilterFunctionBuilder` objects, containing one element -for each filter/function pair. - -`scoreMode` and `boostMode` can only be provided using corresponding enum members instead -of string values: see `FilterFunctionScoreQuery.ScoreMode` and `CombineFunction`. - -`CombineFunction.MULT` has been renamed to `MULTIPLY`. - -==== IdsQueryBuilder - -For simplicity, only one way of adding the ids to the existing list (empty by default) is left: `addIds(String...)` - -==== DocumentAlreadyExistsException removed - -`DocumentAlreadyExistsException` is removed and a `VersionConflictException` is thrown instead (with a better -error description). This will influence code that use the `IndexRequest.opType()` or `IndexRequest.create()` -to index a document only if it doesn't already exist. - -==== ShapeBuilders - -`InternalLineStringBuilder` is removed in favour of `LineStringBuilder`, `InternalPolygonBuilder` in favour of PolygonBuilder` and `Ring` has been replaced with `LineStringBuilder`. Also the abstract base classes `BaseLineStringBuilder` and `BasePolygonBuilder` haven been merged with their corresponding implementations. - -==== RescoreBuilder - -`RecoreBuilder.Rescorer` was merged with `RescoreBuilder`, which now is an abstract superclass. QueryRescoreBuilder currently is its only implementation. - -==== PhraseSuggestionBuilder - -The inner DirectCandidateGenerator class has been moved out to its own class called DirectCandidateGeneratorBuilder. - -==== Elasticsearch will no longer detect logging implementations - -Elasticsearch now logs only to log4j 1.2. Previously if log4j wasn't on the classpath it made some effort to degrade to -slf4j or java.util.logging. Now it'll fail to work without the log4j 1.2 api. The log4j-over-slf4j bridge ought to work -when using the java client. As should log4j 2's log4j-1.2-api. The Elasticsearch server now only supports log4j as -configured by logging.yml and it no longer makes any effort to work if log4j isn't present. - -[[breaking_50_cache_concurrency]] -=== Cache concurrency level settings removed - -Two cache concurrency level settings `indices.requests.cache.concurrency_level` and -`indices.fielddata.cache.concurrency_level` because they no longer apply to the cache implementation used for the -request cache and the field data cache. - -[[breaking_50_non_loopback]] -=== Remove bind option of `non_loopback` - -This setting would arbitrarily pick the first interface not marked as loopback. Instead, specify by address -scope (e.g. `_local_,_site_` for all loopback and private network addresses) or by explicit interface names, -hostnames, or addresses. - -[[breaking_50_thread_pool]] -=== Forbid changing of thread pool types - -Previously, <> could be dynamically adjusted. The thread pool type effectively -controls the backing queue for the thread pool and modifying this is an expert setting with minimal practical benefits -and high risk of being misused. The ability to change the thread pool type for any thread pool has been removed; do note -that it is still possible to adjust relevant thread pool parameters for each of the thread pools (e.g., depending on -the thread pool type, `keep_alive`, `queue_size`, etc.). - -[[breaking_50_cpu_stats]] -=== System CPU stats - -The recent CPU usage (as a percent) has been added to the OS stats -reported under the node stats API and the cat nodes API. The breaking -change here is that there is a new object in the `os` object in the node -stats response. This object is called `cpu` and includes "percent" and -`load_average` as fields. This moves the `load_average` field that was -previously a top-level field in the `os` object to the `cpu` object. The -format of the `load_average` field has changed to an object with fields -`1m`, `5m`, and `15m` representing the one-minute, five-minute and -fifteen-minute loads respectively. If any of these fields are not present, -it indicates that the corresponding value is not available. - -In the cat nodes API response, the `cpu` field is output by default. The -previous `load` field has been removed and is replaced by `load_1m`, -`load_5m`, and `load_15m` which represent the one-minute, five-minute -and fifteen-minute loads respectively. The field will be null if the -corresponding value is not available. - -Finally, the API for `org.elasticsearch.monitor.os.OsStats` has -changed. The `getLoadAverage` method has been removed. The value for -this can now be obtained from `OsStats.Cpu#getLoadAverage` but it is no -longer a double and is instead an object encapsulating the one-minute, -five-minute and fifteen-minute load averages. Additionally, the recent -CPU usage can be obtained from `OsStats.Cpu#getPercent`. - -=== Fields option -Only stored fields are retrievable with this option. -The fields option won't be able to load non stored fields from _source anymore. - -[[breaking_50_allocation]] -=== Primary shard allocation - -Previously, primary shards were only assigned if a quorum of shard copies were found (configurable using -`index.recovery.initial_shards`, now deprecated). In case where a primary had only a single replica, quorum was defined -to be a single shard. This meant that any shard copy of an index with replication factor 1 could become primary, even it -was a stale copy of the data on disk. This is now fixed by using allocation IDs. - -Allocation IDs assign unique identifiers to shard copies. This allows the cluster to differentiate between multiple -copies of the same data and track which shards have been active, so that after a cluster restart, shard copies -containing only the most recent data can become primaries. - -=== Indices Shard Stores command - -By using allocation IDs instead of version numbers to identify shard copies for primary shard allocation, the former versioning scheme -has become obsolete. This is reflected in the indices-shards-stores.html[Indices Shard Stores API]. A new field `allocation_id` replaces the -former `version` field in the result of the Indices Shard Stores command. This field is available for all shard copies that have been either -created with the current version of Elasticsearch or have been active in a cluster running a current version of Elasticsearch. For legacy -shard copies that have not been active in a current version of Elasticsearch, a `legacy_version` field is available instead (equivalent to -the former `version` field). - -=== Reroute commands - -The reroute command `allocate` has been split into two distinct commands `allocate_replica` and `allocate_empty_primary`. -This was done as we introduced a new `allocate_stale_primary` command. The new `allocate_replica` command corresponds to the -old `allocate` command with `allow_primary` set to false. The new `allocate_empty_primary` command corresponds to the old -`allocate` command with `allow_primary` set to true. - -==== `index.shared_filesystem.recover_on_any_node` changes - -The behavior of `index.shared_filesystem.recover_on_any_node = true` has been changed. Previously, in the case where no -shard copies could be found, an arbitrary node was chosen by potentially ignoring allocation deciders. Now, we take -balancing into account but don't assign the shard if the allocation deciders are not satisfied. The behavior has also changed -in the case where shard copies can be found. Previously, a node not holding the shard copy was chosen if none of the nodes -holding shard copies were satisfying the allocation deciders. Now, the shard will be assigned to a node having a shard copy, -even if none of the nodes holding a shard copy satisfy the allocation deciders. - -[[breaking_50_percolator]] -=== Percolator - -Adding percolator queries and modifications to existing percolator queries are no longer visible in immediately -to the percolator. A refresh is required to run before the changes are visible to the percolator. - -The reason that this has changed is that on newly created indices the percolator automatically indexes the query terms -and these query terms are used at percolate time to reduce the amount of queries the percolate API needs evaluate. -This optimization didn't work in the percolate API mode where modifications to queries are immediately visible. - -The percolator by defaults sets the `size` option to `10` whereas before this was set to unlimited. - -The percolate api can no longer accept documents that have fields that don't exist in the mapping. - -When percolating an existing document then specifying a document in the source of the percolate request is not allowed -any more. - -The percolate api no longer modifies the mappings. Before the percolate api could be used to dynamically introduce new -fields to the mappings based on the fields in the document being percolated. This no longer works, because these -unmapped fields are not persisted in the mapping. - -Percolator documents are no longer excluded from the search response. - -[[breaking_50_packaging]] -=== Packaging - -==== Default logging using systemd (since Elasticsearch 2.2.0) - -In previous versions of Elasticsearch, the default logging -configuration routed standard output to /dev/null and standard error to -the journal. However, there are often critical error messages at -startup that are logged to standard output rather than standard error -and these error messages would be lost to the nether. The default has -changed to now route standard output to the journal and standard error -to inherit this setting (these are the defaults for systemd). These -settings can be modified by editing the elasticsearch.service file. - -[[breaking_50_scripting]] -=== Scripting - -==== Script mode settings - -Previously script mode settings (e.g., "script.inline: true", -"script.engine.groovy.inline.aggs: false", etc.) accepted the values -`on`, `true`, `1`, and `yes` for enabling a scripting mode, and the -values `off`, `false`, `0`, and `no` for disabling a scripting mode. -The variants `on`, `1`, and `yes ` for enabling and `off`, `0`, -and `no` for disabling are no longer supported. - -==== Groovy dependencies - -In previous versions of Elasticsearch, the Groovy scripting capabilities -depended on the `org.codehaus.groovy:groovy-all` artifact. In addition -to pulling in the Groovy language, this pulls in a very large set of -functionality, none of which is needed for scripting within -Elasticsearch. Aside from the inherent difficulties in managing such a -large set of dependencies, this also increases the surface area for -security issues. This dependency has been reduced to the core Groovy -language `org.codehaus.groovy:groovy` artifact. - -[[breaking_50_term_vectors]] -=== Term vectors - -The term vectors APIs no longer persist unmapped fields in the mappings. - -The `dfs` parameter has been removed completely, term vectors don't support -distributed document frequencies anymore. - -[[breaking_50_security]] -=== Security - -The option to disable the security manager `--security.manager.enabled` has been removed. In order to grant special -permissions to elasticsearch users must tweak the local Java Security Policy. diff --git a/docs/reference/migration/migrate_5_0/allocation.asciidoc b/docs/reference/migration/migrate_5_0/allocation.asciidoc new file mode 100644 index 00000000000..1e095831381 --- /dev/null +++ b/docs/reference/migration/migrate_5_0/allocation.asciidoc @@ -0,0 +1,54 @@ +[[breaking_50_allocation]] +=== Allocation changes + +==== Primary shard allocation + +Previously, primary shards were only assigned if a quorum of shard copies were +found (configurable using `index.recovery.initial_shards`, now deprecated). In +case where a primary had only a single replica, quorum was defined to be a +single shard. This meant that any shard copy of an index with replication +factor 1 could become primary, even it was a stale copy of the data on disk. +This is now fixed thanks to shard allocation IDs. + +Allocation IDs assign unique identifiers to shard copies. This allows the +cluster to differentiate between multiple copies of the same data and track +which shards have been active so that, after a cluster restart, only shard +copies containing the most recent data can become primaries. + +==== Indices Shard Stores command + +By using allocation IDs instead of version numbers to identify shard copies +for primary shard allocation, the former versioning scheme has become +obsolete. This is reflected in the +<>. + +A new `allocation_id` field replaces the former `version` field in the result +of the Indices Shard Stores command. This field is available for all shard +copies that have been either created with the current version of Elasticsearch +or have been active in a cluster running a current version of Elasticsearch. +For legacy shard copies that have not been active in a current version of +Elasticsearch, a `legacy_version` field is available instead (equivalent to +the former `version` field). + +==== Reroute commands + +The reroute command `allocate` has been split into two distinct commands +`allocate_replica` and `allocate_empty_primary`. This was done as we +introduced a new `allocate_stale_primary` command. The new `allocate_replica` +command corresponds to the old `allocate` command with `allow_primary` set to +false. The new `allocate_empty_primary` command corresponds to the old +`allocate` command with `allow_primary` set to true. + +==== `index.shared_filesystem.recover_on_any_node` changes + +The behavior of `index.shared_filesystem.recover_on_any_node: true` has been +changed. Previously, in the case where no shard copies could be found, an +arbitrary node was chosen by potentially ignoring allocation deciders. Now, we +take balancing into account but don't assign the shard if the allocation +deciders are not satisfied. + +The behavior has also changed in the case where shard copies can be found. +Previously, a node not holding the shard copy was chosen if none of the nodes +holding shard copies were satisfying the allocation deciders. Now, the shard +will be assigned to a node having a shard copy, even if none of the nodes +holding a shard copy satisfy the allocation deciders. diff --git a/docs/reference/migration/migrate_5_0/cat.asciidoc b/docs/reference/migration/migrate_5_0/cat.asciidoc new file mode 100644 index 00000000000..c3b1c84ee8d --- /dev/null +++ b/docs/reference/migration/migrate_5_0/cat.asciidoc @@ -0,0 +1,33 @@ +[[breaking_50_cat_api]] +=== CAT API changes + +==== Use Accept header for specifying response media type + +Previous versions of Elasticsearch accepted the Content-type header +field for controlling the media type of the response in the cat API. +This is in opposition to the HTTP spec which specifies the Accept +header field for this purpose. Elasticsearch now uses the Accept header +field and support for using the Content-Type header field for this +purpose has been removed. + +==== Host field removed from the cat nodes API + +The `host` field has been removed from the cat nodes API as its value +is always equal to the `ip` field. The `name` field is available in the +cat nodes API and should be used instead of the `host` field. + +==== Changes to cat recovery API + +The fields `bytes_recovered` and `files_recovered` have been added to +the cat recovery API. These fields, respectively, indicate the total +number of bytes and files that have been recovered. + +The fields `total_files` and `total_bytes` have been renamed to +`files_total` and `bytes_total`, respectively. + +Additionally, the field `translog` has been renamed to +`translog_ops_recovered`, the field `translog_total` to +`translog_ops` and the field `translog_percent` to +`translog_ops_percent`. The short aliases for these fields are `tor`, +`to`, and `top`, respectively. + diff --git a/docs/reference/migration/migrate_5_0/index-apis.asciidoc b/docs/reference/migration/migrate_5_0/index-apis.asciidoc new file mode 100644 index 00000000000..72651295bbc --- /dev/null +++ b/docs/reference/migration/migrate_5_0/index-apis.asciidoc @@ -0,0 +1,48 @@ +[[breaking_50_index_apis]] +=== Index APIs changes + +==== Closing / deleting indices while running snapshot + +In previous versions of Elasticsearch, closing or deleting an index during a +full snapshot would make the snapshot fail. In 5.0, the close/delete index +request will fail instead. The behavior for partial snapshots remains +unchanged: Closing or deleting an index during a partial snapshot is still +possible. The snapshot result is then marked as partial. + +==== Warmers + +Thanks to several changes like doc values by default and disk-based norms, +warmers are no longer useful. As a consequence, warmers and the warmer API +have been removed: it is no longer possible to register queries that will run +before a new IndexSearcher is published. + +Don't worry if you have warmers defined on your indices, they will simply be +ignored when upgrading to 5.0. + +==== System CPU stats + +The recent CPU usage (as a percent) has been added to the OS stats +reported under the node stats API and the cat nodes API. The breaking +change here is that there is a new object in the `os` object in the node +stats response. This object is called `cpu` and includes percent` and +`load_average` as fields. This moves the `load_average` field that was +previously a top-level field in the `os` object to the `cpu` object. The +format of the `load_average` field has changed to an object with fields +`1m`, `5m`, and `15m` representing the one-minute, five-minute and +fifteen-minute loads respectively. If any of these fields are not present, +it indicates that the corresponding value is not available. + +In the cat nodes API response, the `cpu` field is output by default. The +previous `load` field has been removed and is replaced by `load_1m`, +`load_5m`, and `load_15m` which represent the one-minute, five-minute +and fifteen-minute loads respectively. The field will be null if the +corresponding value is not available. + +Finally, the API for `org.elasticsearch.monitor.os.OsStats` has +changed. The `getLoadAverage` method has been removed. The value for +this can now be obtained from `OsStats.Cpu#getLoadAverage` but it is no +longer a double and is instead an object encapsulating the one-minute, +five-minute and fifteen-minute load averages. Additionally, the recent +CPU usage can be obtained from `OsStats.Cpu#getPercent`. + + diff --git a/docs/reference/migration/migrate_5_0/java.asciidoc b/docs/reference/migration/migrate_5_0/java.asciidoc new file mode 100644 index 00000000000..dc60ab58391 --- /dev/null +++ b/docs/reference/migration/migrate_5_0/java.asciidoc @@ -0,0 +1,227 @@ + + + +[[breaking_50_java_api_changes]] +=== Java API changes + +==== Count api has been removed + +The deprecated count api has been removed from the Java api, use the search api instead and set size to 0. + +The following call + +[source,java] +----- +client.prepareCount(indices).setQuery(query).get(); +----- + +can be replaced with + +[source,java] +----- +client.prepareSearch(indices).setSource(new SearchSourceBuilder().size(0).query(query)).get(); +----- + +==== Elasticsearch will no longer detect logging implementations + +Elasticsearch now logs only to log4j 1.2. Previously if log4j wasn't on the +classpath it made some effort to degrade to slf4j or java.util.logging. Now it +will fail to work without the log4j 1.2 api. The log4j-over-slf4j bridge ought +to work when using the java client, as should log4j 2's log4j-1.2-api. The +Elasticsearch server now only supports log4j as configured by `logging.yml` +and will fail if log4j isn't present. + +==== Groovy dependencies + +In previous versions of Elasticsearch, the Groovy scripting capabilities +depended on the `org.codehaus.groovy:groovy-all` artifact. In addition +to pulling in the Groovy language, this pulls in a very large set of +functionality, none of which is needed for scripting within +Elasticsearch. Aside from the inherent difficulties in managing such a +large set of dependencies, this also increases the surface area for +security issues. This dependency has been reduced to the core Groovy +language `org.codehaus.groovy:groovy` artifact. + +==== DocumentAlreadyExistsException removed + +`DocumentAlreadyExistsException` is removed and a `VersionConflictException` is thrown instead (with a better +error description). This will influence code that use the `IndexRequest.opType()` or `IndexRequest.create()` +to index a document only if it doesn't already exist. + +==== Changes to Query Builders + +===== BoostingQueryBuilder + +Removed setters for mandatory positive/negative query. Both arguments now have +to be supplied at construction time already and have to be non-null. + +===== SpanContainingQueryBuilder + +Removed setters for mandatory big/little inner span queries. Both arguments now have +to be supplied at construction time already and have to be non-null. Updated +static factory methods in QueryBuilders accordingly. + +===== SpanOrQueryBuilder + +Making sure that query contains at least one clause by making initial clause mandatory +in constructor. + +===== SpanNearQueryBuilder + +Removed setter for mandatory slop parameter, needs to be set in constructor now. Also +making sure that query contains at least one clause by making initial clause mandatory +in constructor. Updated the static factory methods in QueryBuilders accordingly. + +===== SpanNotQueryBuilder + +Removed setter for mandatory include/exclude span query clause, needs to be set in constructor now. +Updated the static factory methods in QueryBuilders and tests accordingly. + +===== SpanWithinQueryBuilder + +Removed setters for mandatory big/little inner span queries. Both arguments now have +to be supplied at construction time already and have to be non-null. Updated +static factory methods in QueryBuilders accordingly. + +===== QueryFilterBuilder + +Removed the setter `queryName(String queryName)` since this field is not supported +in this type of query. Use `FQueryFilterBuilder.queryName(String queryName)` instead +when in need to wrap a named query as a filter. + +===== WrapperQueryBuilder + +Removed `wrapperQueryBuilder(byte[] source, int offset, int length)`. Instead simply +use `wrapperQueryBuilder(byte[] source)`. Updated the static factory methods in +QueryBuilders accordingly. + +===== QueryStringQueryBuilder + +Removed ability to pass in boost value using `field(String field)` method in form e.g. `field^2`. +Use the `field(String, float)` method instead. + +===== Operator + +Removed the enums called `Operator` from `MatchQueryBuilder`, `QueryStringQueryBuilder`, +`SimpleQueryStringBuilder`, and `CommonTermsQueryBuilder` in favour of using the enum +defined in `org.elasticsearch.index.query.Operator` in an effort to consolidate the +codebase and avoid duplication. + +===== queryName and boost support + +Support for `queryName` and `boost` has been streamlined to all of the queries. That is +a breaking change till queries get sent over the network as serialized json rather +than in `Streamable` format. In fact whenever additional fields are added to the json +representation of the query, older nodes might throw error when they find unknown fields. + +===== InnerHitsBuilder + +InnerHitsBuilder now has a dedicated addParentChildInnerHits and addNestedInnerHits methods +to differentiate between inner hits for nested vs. parent / child documents. This change +makes the type / path parameter mandatory. + +===== MatchQueryBuilder + +Moving MatchQueryBuilder.Type and MatchQueryBuilder.ZeroTermsQuery enum to MatchQuery.Type. +Also reusing new Operator enum. + +===== MoreLikeThisQueryBuilder + +Removed `MoreLikeThisQueryBuilder.Item#id(String id)`, `Item#doc(BytesReference doc)`, +`Item#doc(XContentBuilder doc)`. Use provided constructors instead. + +Removed `MoreLikeThisQueryBuilder#addLike` in favor of texts and/or items being provided +at construction time. Using arrays there instead of lists now. + +Removed `MoreLikeThisQueryBuilder#addUnlike` in favor to using the `unlike` methods +which take arrays as arguments now rather than the lists used before. + +The deprecated `docs(Item... docs)`, `ignoreLike(Item... docs)`, +`ignoreLike(String... likeText)`, `addItem(Item... likeItems)` have been removed. + +===== GeoDistanceQueryBuilder + +Removing individual setters for lon() and lat() values, both values should be set together + using point(lon, lat). + +===== GeoDistanceRangeQueryBuilder + +Removing setters for to(Object ...) and from(Object ...) in favour of the only two allowed input +arguments (String, Number). Removing setter for center point (point(), geohash()) because parameter +is mandatory and should already be set in constructor. +Also removing setters for lt(), lte(), gt(), gte() since they can all be replaced by equivalent +calls to to/from() and inludeLower()/includeUpper(). + +===== GeoPolygonQueryBuilder + +Require shell of polygon already to be specified in constructor instead of adding it pointwise. +This enables validation, but makes it necessary to remove the addPoint() methods. + +===== MultiMatchQueryBuilder + +Moving MultiMatchQueryBuilder.ZeroTermsQuery enum to MatchQuery.ZeroTermsQuery. +Also reusing new Operator enum. + +Removed ability to pass in boost value using `field(String field)` method in form e.g. `field^2`. +Use the `field(String, float)` method instead. + +===== MissingQueryBuilder + +The MissingQueryBuilder which was deprecated in 2.2.0 is removed. As a replacement use ExistsQueryBuilder +inside a mustNot() clause. So instead of using `new ExistsQueryBuilder(name)` now use +`new BoolQueryBuilder().mustNot(new ExistsQueryBuilder(name))`. + +===== NotQueryBuilder + +The NotQueryBuilder which was deprecated in 2.1.0 is removed. As a replacement use BoolQueryBuilder +with added mustNot() clause. So instead of using `new NotQueryBuilder(filter)` now use +`new BoolQueryBuilder().mustNot(filter)`. + +===== TermsQueryBuilder + +Remove the setter for `termsLookup()`, making it only possible to either use a TermsLookup object or +individual values at construction time. Also moving individual settings for the TermsLookup (lookupIndex, +lookupType, lookupId, lookupPath) to the separate TermsLookup class, using constructor only and moving +checks for validation there. Removed `TermsLookupQueryBuilder` in favour of `TermsQueryBuilder`. + +===== FunctionScoreQueryBuilder + +`add` methods have been removed, all filters and functions must be provided as constructor arguments by +creating an array of `FunctionScoreQueryBuilder.FilterFunctionBuilder` objects, containing one element +for each filter/function pair. + +`scoreMode` and `boostMode` can only be provided using corresponding enum members instead +of string values: see `FilterFunctionScoreQuery.ScoreMode` and `CombineFunction`. + +`CombineFunction.MULT` has been renamed to `MULTIPLY`. + +===== IdsQueryBuilder + +For simplicity, only one way of adding the ids to the existing list (empty by default) is left: `addIds(String...)` + +===== ShapeBuilders + +`InternalLineStringBuilder` is removed in favour of `LineStringBuilder`, `InternalPolygonBuilder` in favour of PolygonBuilder` and `Ring` has been replaced with `LineStringBuilder`. Also the abstract base classes `BaseLineStringBuilder` and `BasePolygonBuilder` haven been merged with their corresponding implementations. + +===== RescoreBuilder + +`RecoreBuilder.Rescorer` was merged with `RescoreBuilder`, which now is an abstract superclass. QueryRescoreBuilder currently is its only implementation. + +===== PhraseSuggestionBuilder + +The inner DirectCandidateGenerator class has been moved out to its own class called DirectCandidateGeneratorBuilder. + +===== SortBuilders + +The `sortMode` setter in `FieldSortBuilder`, `GeoDistanceSortBuilder` and `ScriptSortBuilder` now +accept a `SortMode` enum instead of a String constant. Also the getter returns the same enum type. + +===== SuggestBuilder + +The `setText` method has been changed to `setGlobalText` to make the intent more clear, and a `getGlobalText` method has been added. + +The `addSuggestion` method now required the user specified suggestion name, previously used in the ctor of each suggestion. + +===== SuggestionBuilder + +The `field` setter has been deleted. Instead the field name needs to be specified as constructor argument. diff --git a/docs/reference/migration/migrate_5_0/mapping.asciidoc b/docs/reference/migration/migrate_5_0/mapping.asciidoc new file mode 100644 index 00000000000..768a2438d3e --- /dev/null +++ b/docs/reference/migration/migrate_5_0/mapping.asciidoc @@ -0,0 +1,82 @@ +[[breaking_50_mapping_changes]] +=== Mapping changes + +==== `string` fields replaced by `text`/`keyword` fields + +The `string` field datatype has been replaced by the `text` field for full +text analyzed content, and the `keyword` field for not-analyzed exact string +values. For backwards compatibility purposes, during the 5.x series: + +* `string` fields on pre-5.0 indices will function as before. +* New `string` fields can be added to pre-5.0 indices as before. +* `text` and `keyword` fields can also be added to pre-5.0 indices. +* When adding a `string` field to a new index, the field mapping will be + rewritten as a `text` or `keyword` field if possible, otherwise + an exception will be thrown. Certain configurations that were possible + with `string` fields are no longer possible with `text`/`keyword` fields + such as enabling `term_vectors` on a not-analyzed `keyword` field. + +==== `index` property + +On all field datatypes (except for the deprecated `string` field), the `index` +property now only accepts `true`/`false` instead of `not_analyzed`/`no`. The +`string` field still accepts `analyzed`/`not_analyzed`/`no`. + +==== Doc values on unindexed fields + +Previously, setting a field to `index:no` would also disable doc-values. Now, +doc-values are always enabled on numeric and boolean fields unless +`doc_values` is set to `false`. + +==== Floating points use `float` instead of `double` + +When dynamically mapping a field containing a floating point number, the field +now defaults to using `float` instead of `double`. The reasoning is that +floats should be more than enough for most cases but would decrease storage +requirements significantly. + +==== `fielddata.format` + +Setting `fielddata.format: doc_values` in the mappings used to implicitly +enable doc-values on a field. This no longer works: the only way to enable or +disable doc-values is by using the `doc_values` property of mappings. + +==== Source-transform removed + +The source `transform` feature has been removed. Instead, use an ingest pipeline + +==== `_parent` field no longer indexed + +The join between parent and child documents no longer relies on indexed fields +and therefore from 5.0.0 onwards the `_parent` field is no longer indexed. In +order to find documents that referrer to a specific parent id the new +`parent_id` query can be used. The GET response and hits inside the search +response still include the parent id under the `_parent` key. + +==== Source `format` option + +The `_source` mapping no longer supports the `format` option. It will still be +accepted for indices created before the upgrade to 5.0 for backwards +compatibility, but it will have no effect. Indices created on or after 5.0 +will reject this option. + +==== Object notation + +Core types no longer support the object notation, which was used to provide +per document boosts as follows: + +[source,json] +--------------- +{ + "value": "field_value", + "boost": 42 +} +--------------- + +==== Boost accuracy for queries on `_all` + +Per-field boosts on the `_all` are now compressed into a single byte instead +of the 4 bytes used previously. While this will make the index much more +space-efficient, it also means that index time boosts will be less accurately +encoded. + diff --git a/docs/reference/migration/migrate_5_0/packaging.asciidoc b/docs/reference/migration/migrate_5_0/packaging.asciidoc new file mode 100644 index 00000000000..9be2d4accac --- /dev/null +++ b/docs/reference/migration/migrate_5_0/packaging.asciidoc @@ -0,0 +1,24 @@ +[[breaking_50_packaging]] +=== Packaging + +==== Default logging using systemd (since Elasticsearch 2.2.0) + +In previous versions of Elasticsearch, the default logging +configuration routed standard output to /dev/null and standard error to +the journal. However, there are often critical error messages at +startup that are logged to standard output rather than standard error +and these error messages would be lost to the nether. The default has +changed to now route standard output to the journal and standard error +to inherit this setting (these are the defaults for systemd). These +settings can be modified by editing the elasticsearch.service file. + +==== Longer startup times + +In Elasticsearch 5.0.0 the `-XX:+AlwaysPreTouch` flag has been added to the JVM +startup options. This option touches all memory pages used by the JVM heap +during initialization of the HotSpot VM to reduce the chance of having to commit +a memory page during GC time. This will increase the startup time of +Elasticsearch as well as increasing the initial resident memory usage of the +Java process. + + diff --git a/docs/reference/migration/migrate_5_0/percolator.asciidoc b/docs/reference/migration/migrate_5_0/percolator.asciidoc new file mode 100644 index 00000000000..73f262afb70 --- /dev/null +++ b/docs/reference/migration/migrate_5_0/percolator.asciidoc @@ -0,0 +1,43 @@ +[[breaking_50_percolator]] +=== Percolator changes + +==== Percolator is near-real time + +Previously percolators were activated in real-time, i.e. as soon as they were +indexed. Now, changes to the percolator query are visible in near-real time, +as soon as the index has been refreshed. This change was required because, in +indices created from 5.0 onwards, the terms used in a percolator query are +automatically indexed to allow for more efficient query selection during +percolation. + +==== Percolate and multi percolator APIs + +Percolator and multi percolate APIs have been deprecated and will be removed in the next major release. These APIs have +been replaced by the `percolator` query that can be used in the search and multi search APIs. + +==== Percolator mapping + +The `percolator` query can no longer accept documents that reference fields +that don't already exist in the mapping. Before the percolate API allowed this. + +The `percolator` query no longer modifies the mappings. Before the percolate API +could be used to dynamically introduce new fields to the mappings based on the +fields in the document being percolated. This no longer works, because these +unmapped fields are not persisted in the mapping. + +==== Percolator documents returned by search + +Documents with the `.percolate` type were previously excluded from the search +response, unless the `.percolate` type was specified explicitly in the search +request. Now, percolator documents are treated in the same way as any other +document and are returned by search requests. + +==== Percolating existing document + +When percolating an existing document then also specifying a document as source in the +`percolator` query is not allowed any more. Before the percolate API allowed and ignored +the existing document. + +==== Percolate Stats + +Percolate stats have been replaced with percolator query cache stats in nodes stats and cluster stats APIs. \ No newline at end of file diff --git a/docs/reference/migration/migrate_5_0/plugins.asciidoc b/docs/reference/migration/migrate_5_0/plugins.asciidoc new file mode 100644 index 00000000000..10268887417 --- /dev/null +++ b/docs/reference/migration/migrate_5_0/plugins.asciidoc @@ -0,0 +1,99 @@ +[[breaking_50_plugins]] +=== Plugin changes + +The command `bin/plugin` has been renamed to `bin/elasticsearch-plugin`. The +structure of the plugin ZIP archive has changed. All the plugin files must be +contained in a top-level directory called `elasticsearch`. If you use the +gradle build, this structure is automatically generated. + +==== Site plugins removed + +Site plugins have been removed. Site plugins should be reimplemented as Kibana +plugins. + +==== Multicast plugin removed + +Multicast has been removed. Use unicast discovery, or one of the cloud +discovery plugins. + +==== Plugins with custom query implementations + +Plugins implementing custom queries need to implement the `fromXContent(QueryParseContext)` method in their +`QueryParser` subclass rather than `parse`. This method will take care of parsing the query from `XContent` format +into an intermediate query representation that can be streamed between the nodes in binary format, effectively the +query object used in the java api. Also, the query parser needs to implement the `getBuilderPrototype` method that +returns a prototype of the `NamedWriteable` query, which allows to deserialize an incoming query by calling +`readFrom(StreamInput)` against it, which will create a new object, see usages of `Writeable`. The `QueryParser` +also needs to declare the generic type of the query that it supports and it's able to parse. +The query object can then transform itself into a lucene query through the new `toQuery(QueryShardContext)` method, +which returns a lucene query to be executed on the data node. + +Similarly, plugins implementing custom score functions need to implement the `fromXContent(QueryParseContext)` +method in their `ScoreFunctionParser` subclass rather than `parse`. This method will take care of parsing +the function from `XContent` format into an intermediate function representation that can be streamed between +the nodes in binary format, effectively the function object used in the java api. Also, the query parser needs +to implement the `getBuilderPrototype` method that returns a prototype of the `NamedWriteable` function, which +allows to deserialize an incoming function by calling `readFrom(StreamInput)` against it, which will create a +new object, see usages of `Writeable`. The `ScoreFunctionParser` also needs to declare the generic type of the +function that it supports and it's able to parse. The function object can then transform itself into a lucene +function through the new `toFunction(QueryShardContext)` method, which returns a lucene function to be executed +on the data node. + +==== Cloud AWS plugin changes + +Cloud AWS plugin has been split in two plugins: + +* {plugins}/discovery-ec2.html[Discovery EC2 plugin] +* {plugins}/repository-s3.html[Repository S3 plugin] + +Proxy settings for both plugins have been renamed: + +* from `cloud.aws.proxy_host` to `cloud.aws.proxy.host` +* from `cloud.aws.ec2.proxy_host` to `cloud.aws.ec2.proxy.host` +* from `cloud.aws.s3.proxy_host` to `cloud.aws.s3.proxy.host` +* from `cloud.aws.proxy_port` to `cloud.aws.proxy.port` +* from `cloud.aws.ec2.proxy_port` to `cloud.aws.ec2.proxy.port` +* from `cloud.aws.s3.proxy_port` to `cloud.aws.s3.proxy.port` + +==== Cloud Azure plugin changes + +Cloud Azure plugin has been split in three plugins: + +* {plugins}/discovery-azure.html[Discovery Azure plugin] +* {plugins}/repository-azure.html[Repository Azure plugin] +* {plugins}/store-smb.html[Store SMB plugin] + +If you were using the `cloud-azure` plugin for snapshot and restore, you had in `elasticsearch.yml`: + +[source,yaml] +----- +cloud: + azure: + storage: + account: your_azure_storage_account + key: your_azure_storage_key +----- + +You need to give a unique id to the storage details now as you can define multiple storage accounts: + +[source,yaml] +----- +cloud: + azure: + storage: + my_account: + account: your_azure_storage_account + key: your_azure_storage_key +----- + + +==== Cloud GCE plugin changes + +Cloud GCE plugin has been renamed to {plugins}/discovery-gce.html[Discovery GCE plugin]. + + +==== Mapper Attachments plugin deprecated + +Mapper attachments has been deprecated. Users should use now the {plugins}/ingest-attachment.html[`ingest-attachment`] +plugin. + diff --git a/docs/reference/migration/migrate_5_0/rest.asciidoc b/docs/reference/migration/migrate_5_0/rest.asciidoc new file mode 100644 index 00000000000..590a097f021 --- /dev/null +++ b/docs/reference/migration/migrate_5_0/rest.asciidoc @@ -0,0 +1,17 @@ + +[[breaking_50_rest_api_changes]] +=== REST API changes + +==== id values longer than 512 bytes are rejected + +When specifying an `_id` value longer than 512 bytes, the request will be +rejected. + +==== `/_optimize` endpoint removed + +The deprecated `/_optimize` endpoint has been removed. The `/_forcemerge` +endpoint should be used in lieu of optimize. + +The `GET` HTTP verb for `/_forcemerge` is no longer supported, please use the +`POST` HTTP verb. + diff --git a/docs/reference/migration/migrate_5_0/search.asciidoc b/docs/reference/migration/migrate_5_0/search.asciidoc new file mode 100644 index 00000000000..fad75247a23 --- /dev/null +++ b/docs/reference/migration/migrate_5_0/search.asciidoc @@ -0,0 +1,141 @@ +[[breaking_50_search_changes]] +=== Search and Query DSL changes + +==== `search_type` + +===== `search_type=count` removed + +The `count` search type was deprecated since version 2.0.0 and is now removed. +In order to get the same benefits, you just need to set the value of the `size` +parameter to `0`. + +For instance, the following request: + +[source,sh] +--------------- +GET /my_index/_search?search_type=count +{ + "aggs": { + "my_terms": { + "terms": { + "field": "foo" + } + } + } +} +--------------- + +can be replaced with: + +[source,sh] +--------------- +GET /my_index/_search +{ + "size": 0, + "aggs": { + "my_terms": { + "terms": { + "field": "foo" + } + } + } +} +--------------- + +===== `search_type=scan` removed + +The `scan` search type was deprecated since version 2.1.0 and is now removed. +All benefits from this search type can now be achieved by doing a scroll +request that sorts documents in `_doc` order, for instance: + +[source,sh] +--------------- +GET /my_index/_search?scroll=2m +{ + "sort": [ + "_doc" + ] +} +--------------- + +Scroll requests sorted by `_doc` have been optimized to more efficiently resume +from where the previous request stopped, so this will have the same performance +characteristics as the former `scan` search type. + +==== `fields` parameter + +The `fields` parameter used to try to retrieve field values from stored +fields, and fall back to extracting from the `_source` if a field is not +marked as stored. Now, the `fields` parameter will only return stored fields +-- it will no longer extract values from the `_source`. + +==== search-exists API removed + +The search exists api has been removed in favour of using the search api with +`size` set to `0` and `terminate_after` set to `1`. + + +==== Deprecated queries removed + +The following deprecated queries have been removed: + +`filtered`:: Use `bool` query instead, which supports `filter` clauses too. +`and`:: Use `must` clauses in a `bool` query instead. +`or`:: Use `should` clauses in a `bool` query instead. +`limit`:: Use the `terminate_after` parameter instead. +`fquery`:: Is obsolete after filters and queries have been merged. +`query`:: Is obsolete after filters and queries have been merged. +`query_binary`:: Was undocumented and has been removed. +`filter_binary`:: Was undocumented and has been removed. + + +==== Changes to queries + +* Removed support for the deprecated `min_similarity` parameter in `fuzzy + query`, in favour of `fuzziness`. + +* Removed support for the deprecated `fuzzy_min_sim` parameter in + `query_string` query, in favour of `fuzziness`. + +* Removed support for the deprecated `edit_distance` parameter in completion + suggester, in favour of `fuzziness`. + +* Removed support for the deprecated `filter` and `no_match_filter` fields in `indices` query, +in favour of `query` and `no_match_query`. + +* Removed support for the deprecated `filter` fields in `nested` query, in favour of `query`. + +* Removed support for the deprecated `minimum_should_match` and + `disable_coord` in `terms` query, use `bool` query instead. Also removed + support for the deprecated `execution` parameter. + +* Removed support for the top level `filter` element in `function_score` query, replaced by `query`. + +* The `collect_payloads` parameter of the `span_near` query has been deprecated. Payloads will be loaded when needed. + +* The `score_type` parameter to the `nested`, has_child` and `has_parent` queries has been removed in favour of `score_mode`. + Also, the `total` score mode has been removed in favour of the `sum` mode. + +* When the `max_children` parameter was set to `0` on the `has_child` query + then there was no upper limit on how many child documents were allowed to + match. Now, `0` really means that zero child documents are allowed. If no + upper limit is needed then the `max_children` parameter shouldn't be specified + at all. + + +==== Top level `filter` parameter + +Removed support for the deprecated top level `filter` in the search api, +replaced by `post_filter`. + +==== Highlighters + +Removed support for multiple highlighter names, the only supported ones are: +`plain`, `fvh` and `postings`. + +==== Term vectors API + +The term vectors APIs no longer persist unmapped fields in the mappings. + +The `dfs` parameter to the term vectors API has been removed completely. Term +vectors don't support distributed document frequencies anymore. diff --git a/docs/reference/migration/migrate_5_0/settings.asciidoc b/docs/reference/migration/migrate_5_0/settings.asciidoc new file mode 100644 index 00000000000..6dd15be0ed4 --- /dev/null +++ b/docs/reference/migration/migrate_5_0/settings.asciidoc @@ -0,0 +1,194 @@ +[[breaking_50_settings_changes]] +=== Settings changes + +From Elasticsearch 5.0 on all settings are validated before they are applied. +Node level and default index level settings are validated on node startup, +dynamic cluster and index setting are validated before they are updated/added +to the cluster state. + +Every setting must be a *known* setting. All settings must have been +registered with the node or transport client they are used with. This implies +that plugins that define custom settings must register all of their settings +during plugin loading using the `SettingsModule#registerSettings(Setting)` +method. + +==== Node settings + +The `name` setting has been removed and is replaced by `node.name`. Usage of +`-Dname=some_node_name` is not supported anymore. + +==== Gateway settings + +The `gateway.format` setting for configuring global and index state serialization +format has been removed. By default, `smile` is used as the format. + +==== Transport Settings + +All settings with a `netty` infix have been replaced by their already existing +`transport` synonyms. For instance `transport.netty.bind_host` is no longer +supported and should be replaced by the superseding setting +`transport.bind_host`. + +==== Script mode settings + +Previously script mode settings (e.g., "script.inline: true", +"script.engine.groovy.inline.aggs: false", etc.) accepted the values +`on`, `true`, `1`, and `yes` for enabling a scripting mode, and the +values `off`, `false`, `0`, and `no` for disabling a scripting mode. +The variants `on`, `1`, and `yes ` for enabling and `off`, `0`, +and `no` for disabling are no longer supported. + + +==== Security manager settings + +The option to disable the security manager `security.manager.enabled` has been +removed. In order to grant special permissions to elasticsearch users must +edit the local Java Security Policy. + +==== Network settings + +The `_non_loopback_` value for settings like `network.host` would arbitrarily +pick the first interface not marked as loopback. Instead, specify by address +scope (e.g. `_local_,_site_` for all loopback and private network addresses) +or by explicit interface names, hostnames, or addresses. + +==== Forbid changing of thread pool types + +Previously, <> could be dynamically +adjusted. The thread pool type effectively controls the backing queue for the +thread pool and modifying this is an expert setting with minimal practical +benefits and high risk of being misused. The ability to change the thread pool +type for any thread pool has been removed. It is still possible to adjust +relevant thread pool parameters for each of the thread pools (e.g., depending +on the thread pool type, `keep_alive`, `queue_size`, etc.). + + +==== Analysis settings + +The `index.analysis.analyzer.default_index` analyzer is not supported anymore. +If you wish to change the analyzer to use for indexing, change the +`index.analysis.analyzer.default` analyzer instead. + +==== Ping timeout settings + +Previously, there were three settings for the ping timeout: +`discovery.zen.initial_ping_timeout`, `discovery.zen.ping.timeout` and +`discovery.zen.ping_timeout`. The former two have been removed and the only +setting key for the ping timeout is now `discovery.zen.ping_timeout`. The +default value for ping timeouts remains at three seconds. + +==== Recovery settings + +Recovery settings deprecated in 1.x have been removed: + + * `index.shard.recovery.translog_size` is superseded by `indices.recovery.translog_size` + * `index.shard.recovery.translog_ops` is superseded by `indices.recovery.translog_ops` + * `index.shard.recovery.file_chunk_size` is superseded by `indices.recovery.file_chunk_size` + * `index.shard.recovery.concurrent_streams` is superseded by `indices.recovery.concurrent_streams` + * `index.shard.recovery.concurrent_small_file_streams` is superseded by `indices.recovery.concurrent_small_file_streams` + * `indices.recovery.max_size_per_sec` is superseded by `indices.recovery.max_bytes_per_sec` + +If you are using any of these settings please take the time to review their +purpose. All of the settings above are considered _expert settings_ and should +only be used if absolutely necessary. If you have set any of the above setting +as persistent cluster settings please use the settings update API and set +their superseded keys accordingly. + +The following settings have been removed without replacement + + * `indices.recovery.concurrent_small_file_streams` - recoveries are now single threaded. The number of concurrent outgoing recoveries are throttled via allocation deciders + * `indices.recovery.concurrent_file_streams` - recoveries are now single threaded. The number of concurrent outgoing recoveries are throttled via allocation deciders + +==== Translog settings + +The `index.translog.flush_threshold_ops` setting is not supported anymore. In +order to control flushes based on the transaction log growth use +`index.translog.flush_threshold_size` instead. + +Changing the translog type with `index.translog.fs.type` is not supported +anymore, the `buffered` implementation is now the only available option and +uses a fixed `8kb` buffer. + +The translog by default is fsynced after every `index`, `create`, `update`, +`delete`, or `bulk` request. The ability to fsync on every operation is not +necessary anymore. In fact, it can be a performance bottleneck and it's trappy +since it enabled by a special value set on `index.translog.sync_interval`. +Now, `index.translog.sync_interval` doesn't accept a value less than `100ms` +which prevents fsyncing too often if async durability is enabled. The special +value `0` is no longer supported. + +==== Request Cache Settings + +The deprecated settings `index.cache.query.enable` and +`indices.cache.query.size` have been removed and are replaced with +`index.requests.cache.enable` and `indices.requests.cache.size` respectively. + +`indices.requests.cache.clean_interval has been replaced with +`indices.cache.clean_interval and is no longer supported. + +==== Field Data Cache Settings + +The `indices.fielddata.cache.clean_interval` setting has been replaced with +`indices.cache.clean_interval`. + +==== Allocation settings + +The `cluster.routing.allocation.concurrent_recoveries` setting has been +replaced with `cluster.routing.allocation.node_concurrent_recoveries`. + +==== Similarity settings + +The 'default' similarity has been renamed to 'classic'. + +==== Indexing settings + +The `indices.memory.min_shard_index_buffer_size` and +`indices.memory.max_shard_index_buffer_size` have been removed as +Elasticsearch now allows any one shard to use amount of heap as long as the +total indexing buffer heap used across all shards is below the node's +`indices.memory.index_buffer_size` (defaults to 10% of the JVM heap). + +==== Removed es.max-open-files + +Setting the system property es.max-open-files to true to get +Elasticsearch to print the number of maximum open files for the +Elasticsearch process has been removed. This same information can be +obtained from the <> API, and a warning is logged +on startup if it is set too low. + +==== Removed es.netty.gathering + +Disabling Netty from using NIO gathering could be done via the escape +hatch of setting the system property "es.netty.gathering" to "false". +Time has proven enabling gathering by default is a non-issue and this +non-documented setting has been removed. + +==== Removed es.useLinkedTransferQueue + +The system property `es.useLinkedTransferQueue` could be used to +control the queue implementation used in the cluster service and the +handling of ping responses during discovery. This was an undocumented +setting and has been removed. + +==== Cache concurrency level settings removed + +Two cache concurrency level settings +`indices.requests.cache.concurrency_level` and +`indices.fielddata.cache.concurrency_level` because they no longer apply to +the cache implementation used for the request cache and the field data cache. + +==== Using system properties to configure Elasticsearch + +Elasticsearch can be configured by setting system properties on the +command line via `-Des.name.of.property=value.of.property`. This will be +removed in a future version of Elasticsearch. Instead, use +`-E es.name.of.setting=value.of.setting`. Note that in all cases the +name of the setting must be prefixed with `es.`. + +==== Removed using double-dashes to configure Elasticsearch + +Elasticsearch could previously be configured on the command line by +setting settings via `--name.of.setting value.of.setting`. This feature +has been removed. Instead, use +`-Ees.name.of.setting=value.of.setting`. Note that in all cases the +name of the setting must be prefixed with `es.`. diff --git a/docs/reference/modules/cluster/allocation_awareness.asciidoc b/docs/reference/modules/cluster/allocation_awareness.asciidoc index ee3cbc17f5f..5735b52a1a8 100644 --- a/docs/reference/modules/cluster/allocation_awareness.asciidoc +++ b/docs/reference/modules/cluster/allocation_awareness.asciidoc @@ -21,7 +21,7 @@ attribute called `rack_id` -- we could use any attribute name. For example: [source,sh] ---------------------- -./bin/elasticsearch --node.rack_id rack_one <1> +./bin/elasticsearch -Ees.node.rack_id=rack_one <1> ---------------------- <1> This setting could also be specified in the `elasticsearch.yml` config file. diff --git a/docs/reference/modules/node.asciidoc b/docs/reference/modules/node.asciidoc index 9b940cd64a3..e8ce79375e7 100644 --- a/docs/reference/modules/node.asciidoc +++ b/docs/reference/modules/node.asciidoc @@ -29,8 +29,11 @@ perform data related operations such as CRUD, search, and aggregations. <>:: -A node that has `node.ingest` set to `true` (default). Ingest nodes can be -used to pre-process documents before the actual indexing takes place. +A node that has `node.ingest` set to `true` (default). Ingest nodes are able +to apply an <> to a document in order to transform +and enrich the document before indexing. With a heavy ingest load, it makes +sense to use dedicated ingest nodes and to mark the master and data nodes as +`node.ingest: false`. <>:: @@ -261,7 +264,7 @@ Like all node settings, it can also be specified on the command line as: [source,sh] ----------------------- -./bin/elasticsearch --path.data /var/elasticsearch/data +./bin/elasticsearch -Ees.path.data=/var/elasticsearch/data ----------------------- TIP: When using the `.zip` or `.tar.gz` distributions, the `path.data` setting diff --git a/docs/reference/modules/scripting/security.asciidoc b/docs/reference/modules/scripting/security.asciidoc index af193b35103..e84289bf1d9 100644 --- a/docs/reference/modules/scripting/security.asciidoc +++ b/docs/reference/modules/scripting/security.asciidoc @@ -100,7 +100,13 @@ Security Policy either: * system wide: `$JAVA_HOME/lib/security/java.policy`, * for just the `elasticsearch` user: `/home/elasticsearch/.java.policy`, or -* from a file specified on the command line: `-Djava.security.policy=someURL` +* from a file specified in the `JAVA_OPTS` environment variable with `-Djava.security.policy=someURL`: ++ +[source,js] +--------------------------------- +export JAVA_OPTS="${JAVA_OPTS} -Djava.security.policy=file:///path/to/my.policy` +./bin/elasticsearch +--------------------------------- Permissions may be granted at the class, package, or global level. For instance: diff --git a/docs/reference/query-dsl/function-score-query.asciidoc b/docs/reference/query-dsl/function-score-query.asciidoc index 39fdae80242..1e736efa11d 100644 --- a/docs/reference/query-dsl/function-score-query.asciidoc +++ b/docs/reference/query-dsl/function-score-query.asciidoc @@ -215,6 +215,22 @@ There are a number of options for the `field_value_factor` function: `log1p`, `log2p`, `ln`, `ln1p`, `ln2p`, `square`, `sqrt`, or `reciprocal`. Defaults to `none`. +[cols="<,<",options="header",] +|======================================================================= +| Modifier | Meaning + +| `none` | Do not apply any multiplier to the field value +| `log` | Take the https://en.wikipedia.org/wiki/Logarithm[logarithm] of the field value +| `log1p` | Add 1 to the field value and take the logarithm +| `log2p` | Add 2 to the field value and take the logarithm +| `ln` | Take the https://en.wikipedia.org/wiki/Natural_logarithm[natural logarithm] of the field value +| `ln1p` | Add 1 to the field value and take the natural logarithm +| `ln2p` | Add 2 to the field value and take the natural logarithm +| `square` | Square the field value (multiply it by itself) +| `sqrt` | Take the https://en.wikipedia.org/wiki/Square_root[square root] of the field value +| `reciprocal` | https://en.wikipedia.org/wiki/Multiplicative_inverse[Reciprocate] the field value, same as `1/x` where `x` is the field's value +|======================================================================= + `missing`:: Value used if the document doesn't have that field. The modifier diff --git a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc index c52bcb93e7d..b2103e5772f 100644 --- a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc +++ b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc @@ -52,9 +52,6 @@ Then the following simple query can be executed with a |Option |Description |`_name` |Optional name field to identify the filter -|`coerce` |Set to `true` to normalize longitude and latitude values to a -standard -180:180 / -90:90 coordinate system. (default is `false`). - |`ignore_malformed` |Set to `true` to accept geo points with invalid latitude or longitude (default is `false`). @@ -188,10 +185,10 @@ values separately. "filter" : { "geo_bounding_box" : { "pin.location" : { - "top" : -74.1, - "left" : 40.73, - "bottom" : -71.12, - "right" : 40.01 + "top" : 40.73, + "left" : -74.1, + "bottom" : 40.01, + "right" : -71.12 } } } diff --git a/docs/reference/query-dsl/geo-distance-query.asciidoc b/docs/reference/query-dsl/geo-distance-query.asciidoc index c5b6029dc2f..7ea380bdad2 100644 --- a/docs/reference/query-dsl/geo-distance-query.asciidoc +++ b/docs/reference/query-dsl/geo-distance-query.asciidoc @@ -162,11 +162,6 @@ The following are options allowed on the filter: Optional name field to identify the query -`coerce`:: - - Set to `true` to normalize longitude and latitude values to a standard -180:180 / -90:90 - coordinate system. (default is `false`). - `ignore_malformed`:: Set to `true` to accept geo points with invalid latitude or diff --git a/docs/reference/query-dsl/geo-polygon-query.asciidoc b/docs/reference/query-dsl/geo-polygon-query.asciidoc index 306b2dd2d84..269aeed09ca 100644 --- a/docs/reference/query-dsl/geo-polygon-query.asciidoc +++ b/docs/reference/query-dsl/geo-polygon-query.asciidoc @@ -34,9 +34,6 @@ points. Here is an example: |Option |Description |`_name` |Optional name field to identify the filter -|`coerce` |Set to `true` to normalize longitude and latitude values to a -standard -180:180 / -90:90 coordinate system. (default is `false`). - |`ignore_malformed` |Set to `true` to accept geo points with invalid latitude or longitude (default is `false`). |======================================================================= diff --git a/docs/reference/query-dsl/has-child-query.asciidoc b/docs/reference/query-dsl/has-child-query.asciidoc index 24951bbe930..b98646148f8 100644 --- a/docs/reference/query-dsl/has-child-query.asciidoc +++ b/docs/reference/query-dsl/has-child-query.asciidoc @@ -23,7 +23,7 @@ an example: ==== Scoring capabilities The `has_child` also has scoring support. The -supported score modes are `min`, `max`, `total`, `avg` or `none`. The default is +supported score modes are `min`, `max`, `sum`, `avg` or `none`. The default is `none` and yields the same behaviour as in previous versions. If the score mode is set to another value than `none`, the scores of all the matching child documents are aggregated into the associated parent @@ -35,7 +35,7 @@ inside the `has_child` query: { "has_child" : { "type" : "blog_tag", - "score_mode" : "sum", + "score_mode" : "min", "query" : { "term" : { "tag" : "something" @@ -57,7 +57,7 @@ a match: { "has_child" : { "type" : "blog_tag", - "score_mode" : "sum", + "score_mode" : "min", "min_children": 2, <1> "max_children": 10, <1> "query" : { diff --git a/docs/reference/query-dsl/mlt-query.asciidoc b/docs/reference/query-dsl/mlt-query.asciidoc index ee4b695c2ff..ce2d34144ee 100644 --- a/docs/reference/query-dsl/mlt-query.asciidoc +++ b/docs/reference/query-dsl/mlt-query.asciidoc @@ -73,7 +73,6 @@ present in the index, the syntax is similar to <> or <> APIs. + +===================================== + +[float] +=== Sample Usage + +Create an index with a mapping for the field `message`: + +[source,js] +-------------------------------------------------- +curl -XPUT 'localhost:9200/my-index' -d '{ + "mappings": { + "my-type": { + "properties": { + "message": { + "type": "string" + } + } + } + } +}' +-------------------------------------------------- + +Register a query in the percolator: + +[source,js] +-------------------------------------------------- +curl -XPUT 'localhost:9200/my-index/.percolator/1' -d '{ + "query" : { + "match" : { + "message" : "bonsai tree" + } + } +}' +-------------------------------------------------- + +Match a document to the registered percolator queries: + +[source,js] +-------------------------------------------------- +curl -XGET 'localhost:9200/my-index/_search' -d '{ + "query" : { + "percolator" : { + "document_type" : "my-type", + "document" : { + "message" : "A new bonsai tree in the office" + } + } + } +}' +-------------------------------------------------- + +The above request will yield the following response: + +[source,js] +-------------------------------------------------- +{ + "took": 5, + "timed_out": false, + "_shards": { + "total": 5, + "successful": 5, + "failed": 0 + }, + "hits": { + "total": 1, + "max_score": 0, + "hits": [ + { <1> + "_index": "my-index", + "_type": ".percolator", + "_id": "1", + "_score": 0, + "_source": { + "query": { + "match": { + "message": "bonsai tree" + } + } + } + } + ] + } +} +-------------------------------------------------- + +<1> The percolate query with id `1` matches our document. + +[float] +=== Indexing Percolator Queries + +Percolate queries are stored as documents in a specific format and in an arbitrary index under a reserved type with the +name `.percolator`. The query itself is placed as is in a JSON object under the top level field `query`. + +[source,js] +-------------------------------------------------- +{ + "query" : { + "match" : { + "field" : "value" + } + } +} +-------------------------------------------------- + +Since this is just an ordinary document, any field can be added to this document. This can be useful later on to only +percolate documents by specific queries. + +[source,js] +-------------------------------------------------- +{ + "query" : { + "match" : { + "field" : "value" + } + }, + "priority" : "high" +} +-------------------------------------------------- + +Just as with any other type, the `.percolator` type has a mapping, which you can configure via the mappings APIs. +The default percolate mapping doesn't index the query field, only stores it. + +Because `.percolate` is a type it also has a mapping. By default the following mapping is active: + +[source,js] +-------------------------------------------------- +{ + ".percolator" : { + "properties" : { + "query" : { + "type" : "percolator" + } + } + } +} +-------------------------------------------------- + +If needed, this mapping can be modified with the update mapping API. + +In order to un-register a percolate query the delete API can be used. So if the previous added query needs to be deleted +the following delete requests needs to be executed: + +[source,js] +-------------------------------------------------- +curl -XDELETE localhost:9200/my-index/.percolator/1 +-------------------------------------------------- + +[float] +==== Parameters + +The following parameters are required when percolating a document: + +[horizontal] +`document_type`:: The type / mapping of the document being percolated. This is parameter is always required. +`document`:: The source of the document being percolated. + +Instead of specifying a the source of the document being percolated, the source can also be retrieved from an already +stored document. The `percolator` query will then internally execute a get request to fetch that document. + +In that case the `document` parameter can be substituted with the following parameters: + +[horizontal] +`index`:: The index the document resides in. This is a required parameter. +`type`:: The type of the document to fetch. This is a required parameter. +`id`:: The id of the document to fetch. This is a required parameter. +`routing`:: Optionally, routing to be used to fetch document to percolate. +`preference`:: Optionally, preference to be used to fetch document to percolate. +`version`:: Optionally, the expected version of the document to be fetched. + +[float] +==== Dedicated Percolator Index + +Percolate queries can be added to any index. Instead of adding percolate queries to the index the data resides in, +these queries can also be added to a dedicated index. The advantage of this is that this dedicated percolator index +can have its own index settings (For example the number of primary and replica shards). If you choose to have a dedicated +percolate index, you need to make sure that the mappings from the normal index are also available on the percolate index. +Otherwise percolate queries can be parsed incorrectly. + +[float] +==== Percolating an Existing Document + +In order to percolate a newly indexed document, the `percolator` query can be used. Based on the response +from an index request, the `_id` and other meta information can be used to immediately percolate the newly added +document. + +[float] +===== Example + +Based on the previous example. + +Index the document we want to percolate: + +[source,js] +-------------------------------------------------- +curl -XPUT "http://localhost:9200/my-index/message/1" -d' +{ + "message" : "A new bonsai tree in the office" +}' +-------------------------------------------------- + +Index response: + +[source,js] +-------------------------------------------------- +{ + "_index": "my-index", + "_type": "message", + "_id": "1", + "_version": 1, + "_shards": { + "total": 2, + "successful": 1, + "failed": 0 + }, + "created": true +} +-------------------------------------------------- + +Percolating an existing document, using the index response as basis to build to new search request: + +[source,js] +-------------------------------------------------- +curl -XGET "http://localhost:9200/my-index/_search" -d' +{ + "query" : { + "percolator" : { + "document_type" : "my-type", + "index" : "my-index", + "type" : "message", + "id" : "1", + "version" : 1 <1> + } + } +}' +-------------------------------------------------- + +<1> The version is optional, but useful in certain cases. We can then ensure that we are try to percolate +the document we just have indexed. A change may be made after we have indexed, and if that is the +case the then the search request would fail with a version conflict error. + +The search response returned is identical as in the previous example. + +[float] +==== Percolator and highlighting + +The percolator query is handled in a special way when it comes to highlighting. The percolator queries hits are used +to highlight the document that is provided in the `percolator` query. Whereas with regular highlighting the query in +the search request is used to highlight the hits. + +[float] +===== Example + +This example is based on the mapping of the first example. + +Add a percolator query: + +[source,js] +-------------------------------------------------- +curl -XPUT "http://localhost:9200/my-index/.percolator/1" -d' +{ + "query" : { + "match" : { + "message" : "brown fox" + } + } +}' +-------------------------------------------------- + +Add another percolator query: + +[source,js] +-------------------------------------------------- +curl -XPUT "http://localhost:9200/my-index/.percolator/2" -d' +{ + "query" : { + "match" : { + "message" : "lazy dog" + } + } +}' +-------------------------------------------------- + +Execute a search request with `percolator` and highlighting enabled: + +[source,js] +-------------------------------------------------- +curl -XGET "http://localhost:9200/my-index/_search" -d' +{ + "query" : { + "percolator" : { + "document_type" : "my-type", + "document" : { + "message" : "The quick brown fox jumps over the lazy dog" + } + } + }, + "highlight": { + "fields": { + "message": {} + } + } +}' +-------------------------------------------------- + +This will yield the following response. + +[source,js] +-------------------------------------------------- +{ + "took": 14, + "timed_out": false, + "_shards": { + "total": 5, + "successful": 5, + "failed": 0 + }, + "hits": { + "total": 2, + "max_score": 0, + "hits": [ + { + "_index": "my-index", + "_type": ".percolator", + "_id": "2", + "_score": 0, + "_source": { + "query": { + "match": { + "message": "lazy dog" + } + } + }, + "highlight": { + "message": [ + "The quick brown fox jumps over the lazy dog" <1> + ] + } + }, + { + "_index": "my-index", + "_type": ".percolator", + "_id": "1", + "_score": 0, + "_source": { + "query": { + "match": { + "message": "brown fox" + } + } + }, + "highlight": { + "message": [ + "The quick brown fox jumps over the lazy dog" <1> + ] + } + } + ] + } +} +-------------------------------------------------- + +<1> Instead of the query in the search request highlighting the percolator hits, the percolator queries are highlighting + the document defined in the `percolator` query. + +[float] +==== How it Works Under the Hood + +When indexing a document that contains a query in an index and the `.percolator` type, the query part of the documents gets +parsed into a Lucene query and is kept in memory until that percolator document is removed or the index containing the +`.percolator` type gets removed. So, all the active percolator queries are kept in memory. + +At search time, the document specified in the request gets parsed into a Lucene document and is stored in a in-memory +Lucene index. This in-memory index can just hold this one document and it is optimized for that. Then all the queries +that are registered to the index that the searh request is targeted for, are going to be executed on this single document +in-memory index. This happens on each shard the search request needs to execute. + +By using `routing` or additional queries the amount of percolator queries that need to be executed can be reduced and thus +the time the search API needs to run can be decreased. + +[float] +==== Important Notes + +Because the percolator query is processing one document at a time, it doesn't support queries and filters that run +against child documents such as `has_child` and `has_parent`. + +The percolator doesn't work with queries like `template` and `geo_shape` queries when these queries fetch documents +to substitute parts of the query. The reason is that the percolator stores the query terms during indexing in order to +speedup percolating in certain cases and this doesn't work if part of the query is defined in another document. +There is no way to know for the percolator to know if an external document has changed and even if this was the case the +percolator query has to be reindexed. + +The `wildcard` and `regexp` query natively use a lot of memory and because the percolator keeps the queries into memory +this can easily take up the available memory in the heap space. If possible try to use a `prefix` query or ngramming to +achieve the same result (with way less memory being used). + +[float] +==== Forcing Unmapped Fields to be Handled as Strings + +In certain cases it is unknown what kind of percolator queries do get registered, and if no field mapping exists for fields +that are referred by percolator queries then adding a percolator query fails. This means the mapping needs to be updated +to have the field with the appropriate settings, and then the percolator query can be added. But sometimes it is sufficient +if all unmapped fields are handled as if these were default string fields. In those cases one can configure the +`index.percolator.map_unmapped_fields_as_string` setting to `true` (default to `false`) and then if a field referred in +a percolator query does not exist, it will be handled as a default string field so that adding the percolator query doesn't +fail. \ No newline at end of file diff --git a/docs/reference/query-dsl/regexp-syntax.asciidoc b/docs/reference/query-dsl/regexp-syntax.asciidoc index e57d0e1c779..68ca5912458 100644 --- a/docs/reference/query-dsl/regexp-syntax.asciidoc +++ b/docs/reference/query-dsl/regexp-syntax.asciidoc @@ -220,12 +220,20 @@ Complement:: -- The complement is probably the most useful option. The shortest pattern that -follows a tilde `"~"` is negated. For the string `"abcdef"`: +follows a tilde `"~"` is negated. For instance, `"ab~cd" means: + +* Starts with `a` +* Followed by `b` +* Followed by a string of any length that it anything but `c` +* Ends with `d` + +For the string `"abcdef"`: ab~df # match - ab~cf # no match - a~(cd)f # match - a~(bc)f # no match + ab~cf # match + ab~cdef # no match + a~(cb)def # match + a~(bc)def # no match Enabled with the `COMPLEMENT` or `ALL` flags. diff --git a/docs/reference/query-dsl/special-queries.asciidoc b/docs/reference/query-dsl/special-queries.asciidoc index 1a2d63d2265..16d0020bf10 100644 --- a/docs/reference/query-dsl/special-queries.asciidoc +++ b/docs/reference/query-dsl/special-queries.asciidoc @@ -20,6 +20,10 @@ final query to execute. This query allows a script to act as a filter. Also see the <>. +<>:: + +This query finds queries that are stored as documents that match with +the specified document. include::mlt-query.asciidoc[] @@ -27,3 +31,5 @@ include::template-query.asciidoc[] include::script-query.asciidoc[] +include::percolator-query.asciidoc[] + diff --git a/docs/reference/release-notes.asciidoc b/docs/reference/release-notes.asciidoc index 267525b1b3c..f9391cece06 100644 --- a/docs/reference/release-notes.asciidoc +++ b/docs/reference/release-notes.asciidoc @@ -3,5 +3,11 @@ [partintro] -- -This section will summarize the changes in released versions. +This section summarizes the changes in each release. + +* <> +* <> + -- +include::release-notes/5.0.0-alpha1.asciidoc[] +include::release-notes/5.0.0-alpha1-2x.asciidoc[] diff --git a/docs/reference/release-notes/5.0.0-alpha1-2x.asciidoc b/docs/reference/release-notes/5.0.0-alpha1-2x.asciidoc new file mode 100644 index 00000000000..061f3ae4839 --- /dev/null +++ b/docs/reference/release-notes/5.0.0-alpha1-2x.asciidoc @@ -0,0 +1,600 @@ +[[release-notes-5.0.0-alpha1-2x]] +== 5.0.0-alpha1 Release Notes (Changes previously released in 2.x) + +The changes listed below were first released in the 2.x series. Changes +released for the first time in Elasticsearch 5.0.0-alpha1 are listed in +<>. + + + +[[breaking-5.0.0-alpha1-2x]] +[float] +=== Breaking changes + +Allocation:: +* Speed up shard balancer by reusing shard model while moving shards that can no longer be allocated to a node {pull}16926[#16926] + +Index APIs:: +* Change Field stats API response to include both number and string based min and max values {pull}14674[#14674] (issue: {issue}14404[#14404]) +* Add Force Merge API, deprecate Optimize API {pull}13778[#13778] + +Internal:: +* Forbid changing thread pool types {pull}14367[#14367] (issues: {issue}14294[#14294], {issue}2509[#2509], {issue}2858[#2858], {issue}5152[#5152]) + +Logging:: +* Log cluster health status changes {pull}14557[#14557] (issue: {issue}11657[#11657]) + +Mapping:: +* Add per-index setting to limit number of nested fields {pull}15989[#15989] (issue: {issue}14983[#14983]) + +Nested Docs:: +* If sorting by nested field then the `nested_path` should always be specified {pull}13429[#13429] (issue: {issue}13420[#13420]) + +Scripting:: +* Filter classes loaded by scripts {pull}15262[#15262] +* Lock down javascript and python script engines better {pull}13924[#13924] + +Search:: +* Limit the size of the result window to a dynamic property {pull}13188[#13188] (issue: {issue}9311[#9311]) + + + +[[feature-5.0.0-alpha1-2x]] +[float] +=== New features + +Aggregations:: +* Adds geo_centroid metric aggregator {pull}13846[#13846] (issue: {issue}13621[#13621]) +* Add `percentiles_bucket` pipeline aggregation {pull}13186[#13186] +* Add `stats_bucket` / `extended_stats_bucket` pipeline aggs {pull}13128[#13128] + +Geo:: +* Add CONTAINS relation to geo_shape query {pull}14810[#14810] (issue: {issue}14713[#14713]) +* Add support for Lucene 5.4 GeoPoint queries {pull}14537[#14537] +* Add GeoPointV2 Field Mapping {pull}14536[#14536] + +Network:: +* Allow binding to multiple addresses. {pull}13954[#13954] (issue: {issue}13592[#13592]) + +Plugin Analysis Phonetic:: +* Add support for `daitch_mokotoff` {pull}14834[#14834] + +Plugin Repository S3:: +* Add support for S3 storage class {pull}13656[#13656] (issue: {issue}13655[#13655]) + +Plugins:: +* Decentralize plugin security {pull}14108[#14108] + +Search:: +* Add query profiler {pull}14889[#14889] (issues: {issue}12974[#12974], {issue}6699[#6699]) + + + +[[enhancement-5.0.0-alpha1-2x]] +[float] +=== Enhancements + +Aliases:: +* Add support to _aliases endpoint to specify multiple indices and aliases in one action {pull}15305[#15305] (issue: {issue}15186[#15186]) + +Allocation:: +* Skip capturing least/most FS info for an FS with no total {pull}16001[#16001] (issue: {issue}15919[#15919]) +* Speed improvements for BalancedShardsAllocator {pull}15678[#15678] (issue: {issue}6372[#6372]) +* Simplify delayed shard allocation {pull}14808[#14808] +* Add cluster-wide setting for total shard limit {pull}14563[#14563] (issue: {issue}14456[#14456]) +* Early terminate high disk watermark checks on single data node cluster {pull}13882[#13882] (issue: {issue}9391[#9391]) +* Also use PriorityComparator in shard balancer {pull}13256[#13256] (issue: {issue}13249[#13249]) +* Add support for filtering by publish IP address {pull}8801[#8801] + +Analysis:: +* Add detail response support for _analyze API {pull}11660[#11660] (issue: {issue}11076[#11076]) + +CAT API:: +* Add sync_id to cat shards API {pull}14712[#14712] (issue: {issue}14705[#14705]) +* Add duration field to /_cat/snapshots {pull}14385[#14385] +* Add cat API for repositories and snapshots {pull}14247[#14247] (issue: {issue}13919[#13919]) +* Adds disk used by indices to _cat/allocation {pull}13783[#13783] (issue: {issue}13529[#13529]) + +CRUD:: + +Cluster:: +* Shard state action request logging {pull}16396[#16396] +* Safe cluster state task notifications {pull}15777[#15777] +* Reroute once per batch of shard failures {pull}15510[#15510] +* Add callback for publication of new cluster state {pull}15494[#15494] (issue: {issue}15482[#15482]) +* Use general cluster state batching mechanism for shard started {pull}15023[#15023] (issues: {issue}14725[#14725], {issue}14899[#14899]) +* Use general cluster state batching mechanism for shard failures {pull}15016[#15016] (issues: {issue}14725[#14725], {issue}14899[#14899]) +* Set an newly created IndexShard's ShardRouting before exposing it to operations {pull}14918[#14918] (issue: {issue}10708[#10708]) +* Uniform exceptions for TransportMasterNodeAction {pull}14737[#14737] + +Core:: +* Remove log4j exception hiding {pull}16834[#16834] +* Avoid cloning MessageDigest instances {pull}16479[#16479] +* Add a hard check to ensure we are running with the expected lucene version {pull}16305[#16305] (issue: {issue}16301[#16301]) +* If we can't get a MAC address for the node, use a dummy one {pull}15266[#15266] (issue: {issue}10099[#10099]) +* Simplify shard inactive logging {pull}15259[#15259] (issue: {issue}15252[#15252]) +* Simplify IndexingMemoryController#checkIdle {pull}15252[#15252] (issue: {issue}15251[#15251]) +* IndexingMemoryController should not track shard index states {pull}15251[#15251] (issues: {issue}13918[#13918], {issue}15225[#15225]) +* Verify Checksum once it has been fully written to fail as soon as possible {pull}13896[#13896] + +Discovery:: +* Don't allow nodes with missing custom meta data to join cluster {pull}15401[#15401] (issue: {issue}13445[#13445]) + +Exceptions:: +* Added file name to exceptions when failing to read index state {pull}16850[#16850] (issue: {issue}16713[#16713]) +* Add Exception class name to message in `NotSerializableExceptionWrapper` {pull}16325[#16325] +* Deduplicate cause if already contained in shard failures {pull}14432[#14432] +* Give a better exception when running from freebsd jail without enforce_statfs=1 {pull}14135[#14135] (issue: {issue}12018[#12018]) +* Make root_cause of field conflicts more obvious {pull}13976[#13976] (issue: {issue}12839[#12839]) +* Use a dedicated id to serialize EsExceptions instead of it's class name. {pull}13629[#13629] + +Fielddata:: +* Update GeoPoint FieldData for GeoPointV2 {pull}14345[#14345] + +Geo:: +* Upgrade GeoPointField to use Lucene 5.5 PrefixEncoding {pull}16482[#16482] +* Geo: Fix toString() in GeoDistanceRangeQuery and GeoPolygonQuery {pull}15026[#15026] +* Enable GeoPointV2 with backward compatibility testing {pull}14667[#14667] (issues: {issue}10761[#10761], {issue}11159[#11159], {issue}9859[#9859]) +* Refactor Geo utilities to Lucene 5.4 {pull}14339[#14339] + +Index APIs:: +* Add option to disable closing indices {pull}14169[#14169] (issue: {issue}14168[#14168]) + +Index Templates:: +* Disallow index template pattern to be the same as an alias name {pull}15184[#15184] (issue: {issue}14842[#14842]) + +Internal:: +* Cleanup search sub transport actions and collapse o.e.action.search.type package into o.e.action.search {pull}16758[#16758] (issue: {issue}11710[#11710]) +* Simplify the Text API. {pull}15511[#15511] +* Simpler using compressed oops flag representation {pull}15509[#15509] (issue: {issue}15489[#15489]) +* Info on compressed ordinary object pointers {pull}15489[#15489] (issues: {issue}13187[#13187], {issue}455[#455]) +* Explicitly log cluster state update failures {pull}15428[#15428] (issues: {issue}14899[#14899], {issue}15016[#15016], {issue}15023[#15023]) +* Use transport service to handle RetryOnReplicaException to execute replica action on the current node {pull}15363[#15363] +* Make IndexShard operation be more explicit about whether they are expected to run on a primary or replica {pull}15282[#15282] +* Avoid trace logging allocations in TransportBroadcastByNodeAction {pull}15221[#15221] +* Only trace log shard not available exceptions {pull}14950[#14950] (issue: {issue}14927[#14927]) +* Transport options should be immutable {pull}14760[#14760] +* Fix dangling comma in ClusterBlock#toString {pull}14483[#14483] +* Improve some logging around master election and cluster state {pull}14481[#14481] +* Add System#exit(), Runtime#exit() and Runtime#halt() to forbidden APIs {pull}14473[#14473] (issue: {issue}12596[#12596]) +* Simplify XContent detection. {pull}14472[#14472] +* Add threadgroup isolation. {pull}14353[#14353] +* Cleanup plugin security {pull}14311[#14311] +* Add workaround for JDK-8014008 {pull}14274[#14274] +* Refactor retry logic for TransportMasterNodeAction {pull}14222[#14222] +* Remove MetaDataSerivce and it's semaphores {pull}14159[#14159] (issue: {issue}1296[#1296]) +* Cleanup IndexMetaData {pull}14119[#14119] +* Add SpecialPermission to guard exceptions to security policy. {pull}13854[#13854] +* Clean up scripting permissions. {pull}13844[#13844] +* Factor groovy out of core into lang-groovy {pull}13834[#13834] (issue: {issue}13725[#13725]) +* More helpful error message on parameter order {pull}13737[#13737] +* Factor expressions scripts out to lang-expression plugin {pull}13726[#13726] (issue: {issue}13725[#13725]) +* Cleanup InternalClusterInfoService {pull}13543[#13543] +* Remove and forbid use of com.google.common.base.Throwables {pull}13409[#13409] (issue: {issue}13224[#13224]) +* Remove cyclic dependencies between IndexService and FieldData / BitSet caches {pull}13381[#13381] +* Remove and forbid use of com.google.common.base.Objects {pull}13355[#13355] (issue: {issue}13224[#13224]) +* Enable indy (invokedynamic) compile flag for Groovy scripts by default {pull}8201[#8201] (issue: {issue}8184[#8184]) + +Java API:: +* Align handling of interrupts in BulkProcessor {pull}15527[#15527] (issue: {issue}14833[#14833]) +* BulkProcessor backs off exponentially by default {pull}15513[#15513] (issue: {issue}14829[#14829]) +* Reject refresh usage in bulk items when using and fix NPE when no source {pull}15082[#15082] (issue: {issue}7361[#7361]) +* BulkProcessor retries after request handling has been rejected due to a full thread pool {pull}14829[#14829] (issue: {issue}14620[#14620]) + +Logging:: +* Log suppressed stack traces under DEBUG {pull}16627[#16627] (issues: {issue}12991[#12991], {issue}15329[#15329], {issue}16622[#16622]) +* Add circuit breaker name to logging package {pull}14661[#14661] +* Move logging for the amount of free disk to TRACE {pull}14403[#14403] (issue: {issue}12843[#12843]) +* Map log-level 'trace' to JDK-Level 'FINEST' {pull}14234[#14234] + +Mapping:: +* Expose the reason why a mapping merge is issued. {pull}16059[#16059] (issue: {issue}15989[#15989]) +* Add sub-fields support to `bool` fields. {pull}15636[#15636] (issue: {issue}6587[#6587]) +* Improve cross-type dynamic mapping updates. {pull}15633[#15633] (issue: {issue}15568[#15568]) +* Make mapping updates more robust. {pull}15539[#15539] +* Make mapping serialization more robust. {pull}15480[#15480] +* Make mappings immutable. {pull}15313[#15313] (issue: {issue}9365[#9365]) +* Make MappedFieldType.checkTypeName part of MappedFieldType.checkCompatibility. {pull}15245[#15245] +* Register field mappers at the node level. {pull}14896[#14896] (issue: {issue}14828[#14828]) + +Network:: +* Provide better error message when an incompatible node connects to a node {pull}17182[#17182] (issue: {issue}17090[#17090]) +* Add additional fallback to http.publish_port and restrict fallback to transport.publish_port {pull}16626[#16626] (issue: {issue}14535[#14535]) +* only allow code to bind to the user's configured port numbers/ranges {pull}14549[#14549] +* Port of publishAddress should match port of corresponding boundAddress {pull}14535[#14535] (issues: {issue}14503[#14503], {issue}14513[#14513], {issue}14514[#14514]) + +Packaging:: +* Windows service: Use JAVA_HOME environment variable in registry {pull}16552[#16552] (issue: {issue}13521[#13521]) +* Default standard output to the journal in systemd {pull}16159[#16159] (issues: {issue}15315[#15315], {issue}16134[#16134]) +* Use egrep instead of grep -E for Solaris {pull}15755[#15755] (issue: {issue}15628[#15628]) +* punch thru symlinks when loading plugins/modules {pull}15311[#15311] +* set ActiveProcessLimit=1 on windows {pull}15055[#15055] +* set RLIMIT_NPROC = 0 on bsd/os X systems. {pull}15039[#15039] +* Drop ability to execute on Solaris {pull}14200[#14200] +* Nuke ES_CLASSPATH appending, JarHell fail on empty classpath elements {pull}13880[#13880] (issues: {issue}13812[#13812], {issue}13864[#13864]) +* improve seccomp syscall filtering {pull}13829[#13829] +* Block process execution with seccomp on linux/amd64 {pull}13753[#13753] +* Get lang-javascript, lang-python, securemock ready for script refactoring {pull}13695[#13695] +* Remove some bogus permissions only needed for tests. {pull}13620[#13620] +* Remove java.lang.reflect.ReflectPermission "suppressAccessChecks" {pull}13603[#13603] +* Remove JAVA_HOME detection from the debian init script {pull}13514[#13514] (issues: {issue}13403[#13403], {issue}9774[#9774]) + +Plugin Cloud GCE:: +* cloud-gce plugin should check `discovery.type` {pull}13809[#13809] (issue: {issue}13614[#13614]) +* Adding backoff from retries on GCE errors {pull}13671[#13671] (issue: {issue}13460[#13460]) + +Plugin Discovery EC2:: +* Add ap-northeast-2 (seoul) endpoints for EC2 discovery and S3 snapshots {pull}16167[#16167] (issue: {issue}16166[#16166]) +* Adding US-Gov-West {pull}14358[#14358] +* Improved building of disco nodes {pull}14155[#14155] + +Plugin Ingest Attachment:: +* Fix attachments plugins with docx {pull}17059[#17059] (issue: {issue}16864[#16864]) + +Plugin Repository Azure:: +* Add support for secondary azure storage account {pull}13779[#13779] (issue: {issue}13228[#13228]) + +Plugin Repository S3:: +* Add aws canned acl {pull}14297[#14297] (issue: {issue}14103[#14103]) +* Enable S3SignerType {pull}13360[#13360] (issue: {issue}13332[#13332]) + +Plugins:: +* Expose http.type setting, and collapse al(most all) modules relating to transport/http {pull}15434[#15434] (issue: {issue}14148[#14148]) +* Ban RuntimePermission("getClassLoader") {pull}15253[#15253] +* Add nicer error message when a plugin descriptor is missing {pull}15200[#15200] (issue: {issue}15197[#15197]) +* Don't be lenient in PluginService#processModule(Module) {pull}14306[#14306] +* Adds a validation for plugins script to check if java is set {pull}13633[#13633] (issue: {issue}13613[#13613]) +* Output plugin info only in verbose mode {pull}12908[#12908] (issue: {issue}12907[#12907]) + +Query DSL:: +* Allow CIDR notation in query string query {pull}14773[#14773] (issue: {issue}7464[#7464]) +* Internal: simplify filtered query conversion to lucene query {pull}13312[#13312] (issue: {issue}13272[#13272]) + +REST:: +* Make XContentGenerator.writeRaw* safer. {pull}15358[#15358] +* Filter path refactoring {pull}14390[#14390] (issues: {issue}10980[#10980], {issue}11560[#11560], {issue}13344[#13344]) + +Recovery:: +* Handle cancel exceptions on recovery target if the cancel comes from the source {pull}15309[#15309] +* Decouple routing and primary operation logic in TransportReplicationAction {pull}14852[#14852] + +Reindex API:: +* Implement helpful interfaces in reindex requests {pull}17032[#17032] +* Reindex should timeout if sub-requests timeout {pull}16962[#16962] +* Teach reindex to retry on rejection {pull}16556[#16556] (issue: {issue}16093[#16093]) + +Scripting:: +* Remove suppressAccessChecks permission for Groovy script plugin {pull}16839[#16839] (issue: {issue}16527[#16527]) +* Class permission for Groovy references {pull}16660[#16660] (issue: {issue}16657[#16657]) +* Scripting: Allow to get size of array in mustache {pull}16193[#16193] +* Enhancements to the mustache script engine {pull}15661[#15661] +* Add property permissions so groovy scripts can serialize json {pull}14500[#14500] (issue: {issue}14488[#14488]) +* Remove ScriptEngineService.unwrap. {pull}13958[#13958] +* Remove ScriptEngineService.execute. {pull}13956[#13956] + +Search:: +* Caching Weight wrappers should propagate the BulkScorer. {pull}14317[#14317] +* fix numerical issue in function score query {pull}14085[#14085] +* Optimize scrolls for constant-score queries. {pull}13311[#13311] + +Settings:: +* Log warning if max file descriptors too low {pull}16506[#16506] + +Snapshot/Restore:: +* Support wildcards for getting repositories and snapshots {pull}15151[#15151] (issue: {issue}4758[#4758]) +* Add ignore_unavailable parameter to skip unavailable snapshot {pull}14471[#14471] (issue: {issue}13887[#13887]) +* Simplify the BlobContainer blob writing interface {pull}13434[#13434] + +Stats:: +* Pull Fields instance once from LeafReader in completion stats {pull}15090[#15090] (issue: {issue}6593[#6593]) +* Add os.allocated_processors stats {pull}14409[#14409] (issue: {issue}13917[#13917]) +* Adds stats counter for failed indexing requests {pull}13130[#13130] (issue: {issue}8938[#8938]) + +Top Hits:: +* Put method addField on TopHitsBuilder {pull}14597[#14597] (issue: {issue}12962[#12962]) + +Translog:: +* Check for tragic event on all kinds of exceptions not only ACE and IOException {pull}15535[#15535] + +Tribe Node:: +* Tribe nodes should apply cluster state updates in batches {pull}14993[#14993] (issues: {issue}14725[#14725], {issue}14899[#14899]) + + + +[[bug-5.0.0-alpha1-2x]] +[float] +=== Bug fixes + +Aggregations:: +* Build empty extended stats aggregation if no docs collected for bucket {pull}16972[#16972] (issues: {issue}16812[#16812], {issue}9544[#9544]) +* Set meta data for pipeline aggregations {pull}16516[#16516] (issue: {issue}16484[#16484]) +* Filter(s) aggregation should create weights only once. {pull}15998[#15998] +* Make `missing` on terms aggs work with all execution modes. {pull}15746[#15746] (issue: {issue}14882[#14882]) +* Run pipeline aggregations for empty buckets added in the Range Aggregation {pull}15519[#15519] (issue: {issue}15471[#15471]) +* [Children agg] fix bug that prevented all child docs from being evaluated {pull}15457[#15457] +* Correct typo in class name of StatsAggregator {pull}15321[#15321] (issue: {issue}14730[#14730]) +* Fix significant terms reduce for long terms {pull}14948[#14948] (issue: {issue}13522[#13522]) +* Fix NPE in Derivative Pipeline when current bucket value is null {pull}14745[#14745] +* Pass extended bounds into HistogramAggregator when creating an unmapped aggregator {pull}14742[#14742] (issue: {issue}14735[#14735]) +* Added correct generic type parameter on ScriptedMetricBuilder {pull}14018[#14018] (issue: {issue}13986[#13986]) +* Pipeline Aggregations at the root of the agg tree are now validated {pull}13475[#13475] (issue: {issue}13179[#13179]) + +Aliases:: +* Fix _aliases filter and null parameters {pull}16553[#16553] (issues: {issue}16547[#16547], {issue}16549[#16549]) + +Allocation:: +* IndicesStore checks for `allocated elsewhere` for every shard not alocated on the local node {pull}17106[#17106] +* Prevent peer recovery from node with older version {pull}15775[#15775] +* Fix calculation of next delay for delayed shard allocation {pull}14765[#14765] +* Take ignored unallocated shards into account when making allocation decision {pull}14678[#14678] (issue: {issue}14670[#14670]) +* Only allow rebalance operations to run if all shard store data is available {pull}14591[#14591] (issue: {issue}14387[#14387]) +* Delayed allocation can miss a reroute {pull}14494[#14494] (issues: {issue}14010[#14010], {issue}14011[#14011], {issue}14445[#14445]) +* Check rebalancing constraints when shards are moved from a node they can no longer remain on {pull}14259[#14259] (issue: {issue}14057[#14057]) + +Analysis:: +* Analysis : Allow string explain param in JSON {pull}16977[#16977] (issue: {issue}16925[#16925]) +* Analysis : Fix no response from Analyze API without specified index {pull}15447[#15447] (issue: {issue}15148[#15148]) + +Bulk:: +* Bulk api: fail deletes when routing is required but not specified {pull}16675[#16675] (issues: {issue}10136[#10136], {issue}16645[#16645]) +* Do not release unacquired semaphore {pull}14909[#14909] (issue: {issue}14908[#14908]) + +CAT API:: +* Properly set indices and indicesOptions on subrequest made by /_cat/indices {pull}14360[#14360] + +CRUD:: +* Throw exception if content type could not be determined in Update API {pull}15904[#15904] (issue: {issue}15822[#15822]) +* Index name expressions should not be broken up {pull}13691[#13691] (issue: {issue}13665[#13665]) + +Cache:: +* Handle closed readers in ShardCoreKeyMap {pull}16027[#16027] + +Cluster:: +* Index deletes not applied when cluster UUID has changed {pull}16825[#16825] (issue: {issue}11665[#11665]) +* Only fail the relocation target when a replication request on it fails {pull}15791[#15791] (issue: {issue}15790[#15790]) +* Handle shards assigned to nodes that are not in the cluster state {pull}14586[#14586] (issue: {issue}14584[#14584]) +* Bulk cluster state updates on index deletion {pull}11258[#11258] (issue: {issue}7295[#7295]) + +Core:: +* BitSetFilterCache duplicates its content. {pull}15836[#15836] (issue: {issue}15820[#15820]) +* Limit the max size of bulk and index thread pools to bounded number of processors {pull}15585[#15585] (issue: {issue}15582[#15582]) +* AllTermQuery's scorer should skip segments that never saw the requested term {pull}15506[#15506] +* Include root-cause exception when we fail to change shard's index buffer {pull}14867[#14867] +* Restore thread interrupt flag after an InterruptedException {pull}14799[#14799] (issue: {issue}14798[#14798]) +* Record all bytes of the checksum in VerifyingIndexOutput {pull}13923[#13923] (issues: {issue}13848[#13848], {issue}13896[#13896]) +* When shard becomes active again, immediately increase its indexing buffer {pull}13918[#13918] (issue: {issue}13802[#13802]) +* Close TokenStream in finally clause {pull}13870[#13870] (issue: {issue}11947[#11947]) +* LoggingRunnable.run should catch and log all errors, not just Exception? {pull}13718[#13718] (issue: {issue}13487[#13487]) + +Exceptions:: +* Fix ensureNodesAreAvailable's error message {pull}14007[#14007] (issue: {issue}13957[#13957]) + +Expressions:: +* Check that _value is used in aggregations script before setting value to specialValue {pull}17091[#17091] (issue: {issue}14262[#14262]) + +Fielddata:: +* Don't cache top level field data for fields that don't exist {pull}14693[#14693] + +Geo:: +* Remove .geohash suffix from GeoDistanceQuery and GeoDistanceRangeQuery {pull}15871[#15871] (issue: {issue}15179[#15179]) +* Geo: Allow numeric parameters enclosed in quotes for 'geohash_grid' aggregation {pull}14440[#14440] (issue: {issue}13132[#13132]) +* Resync Geopoint hashCode/equals method {pull}14124[#14124] (issue: {issue}14083[#14083]) +* Fix GeoPointFieldMapper to index geohash at correct precision. {pull}13649[#13649] (issue: {issue}12467[#12467]) + +Highlighting:: +* Don't override originalQuery with request filters {pull}15793[#15793] (issue: {issue}15689[#15689]) +* Fix spans extraction to not also include individual terms. {pull}15516[#15516] (issues: {issue}13239[#13239], {issue}15291[#15291]) + +Index APIs:: +* Field stats: Index constraints should remove indices in the response if the field to evaluate is empty {pull}14868[#14868] +* Field stats: Fix NPE for index constraint on empty index {pull}14841[#14841] +* Field stats: Added `format` option for index constraints {pull}14823[#14823] (issue: {issue}14804[#14804]) +* Forbid index name `.` and `..` {pull}13862[#13862] (issue: {issue}13858[#13858]) + +Inner Hits:: +* Query and top level inner hit definitions shouldn't overwrite each other {pull}16222[#16222] (issue: {issue}16218[#16218]) + +Internal:: +* Log uncaught exceptions from scheduled once tasks {pull}15824[#15824] (issue: {issue}15814[#15814]) +* FunctionScoreQuery should implement two-phase iteration. {pull}15602[#15602] +* Make sure the remaining delay of unassigned shard is updated with every reroute {pull}14890[#14890] (issue: {issue}14808[#14808]) +* Throw a meaningful error when loading metadata and an alias and index have the same name {pull}14842[#14842] (issue: {issue}14706[#14706]) +* fixup issues with 32-bit jvm {pull}14609[#14609] +* Failure to update the cluster state with the recovered state should make sure it will be recovered later {pull}14485[#14485] +* Gateway: a race condition can prevent the initial cluster state from being recovered {pull}13997[#13997] +* Verify actually written checksum in VerifyingIndexOutput {pull}13848[#13848] +* An inactive shard is activated by triggered synced flush {pull}13802[#13802] +* Remove all setAccessible in tests and forbid {pull}13539[#13539] +* Remove easy uses of setAccessible in tests. {pull}13537[#13537] +* Ban setAccessible from core code, restore monitoring stats under java 9 {pull}13531[#13531] (issue: {issue}13527[#13527]) + +Logging:: +* Add missing index name to indexing slow log {pull}17026[#17026] (issue: {issue}17025[#17025]) +* ParseFieldMatcher should log when using deprecated settings. {pull}16988[#16988] +* Don't log multi-megabyte guice exceptions. {pull}13782[#13782] +* Moving system property setting to before it can be used {pull}13660[#13660] (issue: {issue}13658[#13658]) + +Mapping:: +* Put mapping operations must update metadata of all types. {pull}16264[#16264] (issue: {issue}16239[#16239]) +* Fix serialization of `search_analyzer`. {pull}16255[#16255] +* Reuse metadata mappers for dynamic updates. {pull}16023[#16023] (issue: {issue}15997[#15997]) +* Fix MapperService#searchFilter(...) {pull}15923[#15923] (issue: {issue}15757[#15757]) +* Fix initial sizing of BytesStreamOutput. {pull}15864[#15864] (issue: {issue}15789[#15789]) +* MetaDataMappingService should call MapperService.merge with the original mapping update. {pull}15508[#15508] +* MapperService: check index.mapper.dynamic during index creation {pull}15424[#15424] (issue: {issue}15381[#15381]) +* Only text fields should accept analyzer and term vector settings. {pull}15308[#15308] +* Mapper parsers should not check for a `tokenized` property. {pull}15289[#15289] +* Validate that fields are defined only once. {pull}15243[#15243] (issue: {issue}15057[#15057]) +* Check mapping compatibility up-front. {pull}15175[#15175] (issue: {issue}15049[#15049]) +* Don't treat _default_ as a regular type. {pull}15156[#15156] (issue: {issue}15049[#15049]) +* Don't ignore mapping merge failures. {pull}15144[#15144] (issue: {issue}15049[#15049]) +* Treat mappings at an index-level feature. {pull}15142[#15142] +* Make _type use doc values {pull}14783[#14783] (issue: {issue}14781[#14781]) + +Network:: +* Only accept transport requests after node is fully initialized {pull}16746[#16746] (issue: {issue}16723[#16723]) + +Packaging:: +* Fix waiting for pidfile {pull}16718[#16718] (issue: {issue}16717[#16717]) +* Fix Windows service installation failure {pull}15549[#15549] (issue: {issue}15349[#15349]) +* Enable es_include at init {pull}15173[#15173] +* Handle system policy correctly {pull}14704[#14704] (issue: {issue}14690[#14690]) +* Startup script exit status should catch daemonized startup failures {pull}14170[#14170] (issue: {issue}14163[#14163]) +* Don't let ubuntu try to install its crazy jayatana agent. {pull}13813[#13813] (issue: {issue}13785[#13785]) + +Parent/Child:: +* Check that parent_type in Has Parent Query has child types {pull}16923[#16923] (issue: {issue}16692[#16692]) +* Has child query forces default similarity {pull}16611[#16611] (issues: {issue}16550[#16550], {issue}4977[#4977]) + +Percolator:: +* Don't replace found fields if map unmapped fields as string is enabled {pull}16043[#16043] (issue: {issue}10500[#10500]) +* mpercolate api should serialise start time {pull}15938[#15938] (issue: {issue}15908[#15908]) + +Plugin Delete By Query:: +* Fix Delete-by-Query with Shield {pull}14658[#14658] (issue: {issue}14527[#14527]) + +Plugin Discovery GCE:: +* Add setFactory permission to GceDiscoveryPlugin {pull}16860[#16860] (issue: {issue}16485[#16485]) + +Plugin Mapper Attachment:: +* Fix toXContent() for mapper attachments field {pull}15110[#15110] + +Plugin Repository Azure:: + +Plugin Repository S3:: +* Hack around aws security hole of accessing sun.security.ssl, s3 repository works on java 9 again {pull}13538[#13538] (issue: {issue}432[#432]) + +Plugins:: +* Fix plugin list command error message {pull}14288[#14288] (issue: {issue}14287[#14287]) +* Fix HTML response during redirection {pull}11374[#11374] (issue: {issue}11370[#11370]) + +Query DSL:: +* Fix FunctionScore equals/hashCode to include minScore and friends {pull}15676[#15676] +* Min should match greater than the number of optional clauses should return no result {pull}15571[#15571] (issue: {issue}15521[#15521]) +* Return a better exception message when `regexp` query is used on a numeric field {pull}14910[#14910] (issue: {issue}14782[#14782]) + +REST:: +* Remove detect_noop from REST spec {pull}16386[#16386] +* Make text parsing less lenient. {pull}15679[#15679] +* Throw exception when trying to write map with null keys {pull}15479[#15479] (issue: {issue}14346[#14346]) +* Fix OOM in AbstractXContentParser {pull}15350[#15350] (issue: {issue}15338[#15338]) +* XContentFactory.xContentType: allow for possible UTF-8 BOM for JSON XContentType {pull}14611[#14611] (issue: {issue}14442[#14442]) +* RestUtils.decodeQueryString ignores the URI fragment when parsing a query string {pull}13365[#13365] (issue: {issue}13320[#13320]) + +Recovery:: +* Try to renew sync ID if `flush=true` on forceMerge {pull}17108[#17108] (issue: {issue}17019[#17019]) +* CancellableThreads should also treat ThreadInterruptedException as InterruptedException {pull}15318[#15318] + +Reindex API:: +* Properly register reindex status {pull}17125[#17125] +* Make search failure cause rest failure {pull}16889[#16889] (issue: {issue}16037[#16037]) + +Scripting:: +* Add permission to access sun.reflect.MethodAccessorImpl from Groovy scripts {pull}16540[#16540] (issue: {issue}16536[#16536]) +* Security permissions for Groovy closures {pull}16196[#16196] (issues: {issue}16194[#16194], {issue}248[#248]) + +Search:: +* Do not apply minimum_should_match on auto generated boolean query if the coordination factor is disabled. {pull}16155[#16155] +* Do not apply minimum-should-match on a boolean query if the coords are disabled {pull}16078[#16078] (issue: {issue}15858[#15858]) +* Fix blended terms take 2 {pull}15894[#15894] (issue: {issue}15860[#15860]) +* Fix NPE when a segment with an empty cache gets closed. {pull}15202[#15202] (issue: {issue}15043[#15043]) +* Fix the quotes in the explain message for a script score function without parameters {pull}11398[#11398] + +Settings:: +* TransportClient should use updated setting for initialization of modules and service {pull}16095[#16095] +* ByteSizeValue.equals should normalize units {pull}13784[#13784] + +Snapshot/Restore:: +* Prevent closing index during snapshot restore {pull}16933[#16933] (issue: {issue}16321[#16321]) +* Add node version check to shard allocation during restore {pull}16520[#16520] (issue: {issue}16519[#16519]) +* Snapshot restore and index creates should keep index settings and cluster blocks in sync {pull}13931[#13931] (issue: {issue}13213[#13213]) +* Fix blob size in writeBlob() method {pull}13574[#13574] (issue: {issue}13434[#13434]) + +Stats:: +* Fix recovery translog stats totals when recovering from store {pull}16493[#16493] (issue: {issue}15974[#15974]) +* Fix calculation of age of pending tasks {pull}15995[#15995] (issue: {issue}15988[#15988]) +* Add extra validation into `cluster/stats` {pull}14699[#14699] (issue: {issue}7390[#7390]) +* Omit current* stats for OldShardStats {pull}13801[#13801] (issue: {issue}13386[#13386]) + +Translog:: +* Never delete translog-N.tlog file when creation fails {pull}15788[#15788] +* Close recovered translog readers if createWriter fails {pull}15762[#15762] (issue: {issue}15754[#15754]) +* Fail and close translog hard if writing to disk fails {pull}15420[#15420] (issue: {issue}15333[#15333]) +* Prevent writing to closed channel if translog is already closed {pull}15012[#15012] (issue: {issue}14866[#14866]) +* Don't delete temp recovered checkpoint file if it was renamed {pull}14872[#14872] (issue: {issue}14695[#14695]) +* Translog recovery can repeatedly fail if we run out of disk {pull}14695[#14695] +* Pending operations in the translog prevent shard from being marked as inactive {pull}13759[#13759] (issue: {issue}13707[#13707]) + +Tribe Node:: +* Passthrough environment and network settings to tribe client nodes {pull}16893[#16893] +* Tribe node: pass path.conf to inner tribe clients {pull}16258[#16258] (issue: {issue}16253[#16253]) +* Fix tribe node to load config file for internal client nodes {pull}15300[#15300] (issues: {issue}13383[#13383], {issue}14573[#14573]) + + + +[[regression-5.0.0-alpha1-2x]] +[float] +=== Regressions + +Analysis:: +* Add PathHierarchy type back to path_hierarchy tokenizer for backward compatibility with 1.x {pull}15785[#15785] (issue: {issue}15756[#15756]) + +Internal:: +* Deduplicate concrete indices after indices resolution {pull}14316[#14316] (issues: {issue}11258[#11258], {issue}12058[#12058]) + +Plugin Cloud Azure:: +* Filter cloud azure credentials {pull}14863[#14863] (issues: {issue}13779[#13779], {issue}14843[#14843]) + +REST:: +* Don't return all indices immediately if count of expressions >1 and first expression is * {pull}17033[#17033] (issue: {issue}17027[#17027]) + + + +[[upgrade-5.0.0-alpha1-2x]] +[float] +=== Upgrades + +Core:: +* Upgrade to Lucene 5.5.0 official release {pull}16742[#16742] +* Upgrade to lucene 5.5.0-snapshot-850c6c2 {pull}16615[#16615] +* Upgrade to lucene 5.5.0-snapshot-4de5f1d {pull}16400[#16400] (issues: {issue}16373[#16373], {issue}16399[#16399]) +* Update lucene to r1725675 {pull}16114[#16114] +* Upgrade to lucene-5.5.0-snapshot-1721183. {pull}15575[#15575] +* Upgrade Lucene to 5.4.0-snapshot-1715952 {pull}14951[#14951] +* Upgrade Lucene to 5.4.0-snapshot-1714615 {pull}14784[#14784] +* Upgrade to lucene-5.4.0-snapshot-1712973. {pull}14619[#14619] +* update to lucene-5.4.x-snapshot-1711508 {pull}14398[#14398] +* Upgrade to lucene-5.4-snapshot-1710880. {pull}14320[#14320] +* Upgrade to lucene-5.4-snapshot-1708254. {pull}14074[#14074] +* upgrade lucene to r1702265 {pull}13439[#13439] +* Upgrade master to lucene 5.4-snapshot r1701068 {pull}13324[#13324] + +Geo:: +* Update to spatial4j 0.5 for correct Multi-Geometry {pull}14269[#14269] (issue: {issue}9904[#9904]) + +Internal:: +* Update to Jackson 2.6.2 {pull}13344[#13344] (issues: {issue}10980[#10980], {issue}207[#207], {issue}213[#213]) + +Plugin Cloud AWS:: +* Update AWS SDK version to 1.10.19 {pull}13655[#13655] (issue: {issue}13656[#13656]) + +Plugin Cloud Azure:: +* Update Azure Service Management API to 0.9.0 {pull}15232[#15232] (issue: {issue}15209[#15209]) + +Plugin Discovery Azure:: +* Upgrade azure SDK to 0.9.3 {pull}17102[#17102] (issues: {issue}17042[#17042], {issue}557[#557]) + +Plugin Lang JS:: +* upgrade rhino for plugins/lang-javascript {pull}14466[#14466] + +Plugin Repository Azure:: +* Upgrade Azure Storage client to 4.0.0 {pull}16084[#16084] (issues: {issue}12567[#12567], {issue}15080[#15080], {issue}15976[#15976]) + +Plugin Repository S3:: +* Upgrade to aws 1.10.33 {pull}14672[#14672] + +Scripting:: +* Upgrade groovy dependency in lang-groovy module to version 2.4.6 {pull}16830[#16830] (issue: {issue}16527[#16527]) + + + diff --git a/docs/reference/release-notes/5.0.0-alpha1.asciidoc b/docs/reference/release-notes/5.0.0-alpha1.asciidoc new file mode 100644 index 00000000000..eac01a915e5 --- /dev/null +++ b/docs/reference/release-notes/5.0.0-alpha1.asciidoc @@ -0,0 +1,688 @@ +[[release-notes-5.0.0-alpha1]] +== 5.0.0-alpha1 Release Notes + +The changes listed below have been released for the first time in +Elasticsearch 5.0.0-alpha1. Changes in this release which were first released +in the 2.x series are listed in <>. + +[[breaking-5.0.0-alpha1]] +[float] +=== Breaking changes + +Aggregations:: +* getKeyAsString and key_as_string should be the same for terms aggregation on boolean field {pull}15393[#15393] + +Aliases:: +* make get alias expand to open and closed indices by default {pull}15954[#15954] (issue: {issue}14982[#14982]) + +Allocation:: +* Simplify shard balancer interface {pull}17028[#17028] (issue: {issue}8954[#8954]) +* Remove DisableAllocationDecider {pull}13313[#13313] + +CAT API:: +* Add raw recovery progress to cat recovery API {pull}17064[#17064] (issue: {issue}17022[#17022]) +* Remove host from cat nodes API {pull}16656[#16656] (issues: {issue}12959[#12959], {issue}16575[#16575]) +* Using the accept header in the request instead of content-type in _cat API. {pull}14421[#14421] (issue: {issue}14195[#14195]) + +CRUD:: +* Remove object notation for core types. {pull}15684[#15684] (issue: {issue}15388[#15388]) + +Cache:: +* Refactor IndicesRequestCache to make it testable. {pull}16610[#16610] +* Remove deprecated query cache settings {pull}15592[#15592] + +Core:: +* Bootstrap does not set system properties {pull}17088[#17088] (issues: {issue}16579[#16579], {issue}16791[#16791]) +* Add max number of processes check {pull}16919[#16919] +* Add mlockall bootstrap check {pull}16909[#16909] +* Remove es.useLinkedTransferQueue {pull}16786[#16786] +* One log {pull}16703[#16703] (issue: {issue}16585[#16585]) + +Engine:: +* Remove `index.compound_on_flush` setting and default to `true` {pull}15594[#15594] (issue: {issue}10778[#10778]) + +Fielddata:: +* Remove "uninverted" and "binary" fielddata support for numeric and boolean fields. {pull}14082[#14082] + +Index APIs:: +* Remove `GET` option for /_forcemerge {pull}15223[#15223] (issue: {issue}15165[#15165]) +* Remove /_optimize REST API endpoint {pull}14226[#14226] (issue: {issue}13778[#13778]) + +Internal:: +* Cli: Switch to jopt-simple {pull}17024[#17024] (issue: {issue}11564[#11564]) +* Replace ContextAndHeaders with a ThreadPool based ThreadLocal implementation {pull}15776[#15776] +* Remove NodeBuilder {pull}15354[#15354] +* Fix IndexSearcherWrapper interface to not depend on the EngineConfig {pull}14654[#14654] +* Cleanup query parsing and remove IndexQueryParserService {pull}14452[#14452] +* Fold IndexCacheModule into IndexModule {pull}14293[#14293] +* Remove circular dependency between IndicesService and IndicesStore {pull}14285[#14285] +* Remove guice injection from IndexStore and friends {pull}14279[#14279] +* Simplify similarity module and friends {pull}13942[#13942] +* Remove shard-level injector {pull}13881[#13881] +* Refactor SearchRequest to be parsed on the coordinating node {pull}13859[#13859] +* Remove support for pre 2.0 indices {pull}13799[#13799] + +Java API:: +* Remove the count api {pull}14166[#14166] (issue: {issue}13928[#13928]) +* IdsQueryBuilder to accept only non null ids and types {pull}13937[#13937] + +Mapping:: +* Change the field mapping index time boost into a query time boost. {pull}16900[#16900] +* Deprecate string in favor of text/keyword. {pull}16877[#16877] +* [Mapping] Several MappingService cleanups {pull}16133[#16133] (issue: {issue}15924[#15924]) +* [Mapping] Cleanup ParentFieldMapper: {pull}16045[#16045] +* Remove the `format` option of the `_source` field. {pull}15398[#15398] +* Remove transform {pull}13657[#13657] (issue: {issue}12674[#12674]) + +Network:: +* Remove ability to disable Netty gathering writes {pull}16774[#16774] (issue: {issue}7811[#7811]) + +Parent/Child:: +* Removed `total` score mode in favour for `sum` score mode. {pull}17174[#17174] (issues: {issue}13470[#13470], {issue}17083[#17083]) +* Several other parent/child cleanups {pull}13470[#13470] +* Removed pre 2.x parent child implementation {pull}13376[#13376] + +Percolator:: +* Change the percolate api to not dynamically add fields to mapping {pull}16077[#16077] (issue: {issue}15751[#15751]) + +Plugins:: +* Rename bin/plugin in bin/elasticsearch-plugin {pull}16454[#16454] +* Change the inner structure of the plugins zip {pull}16453[#16453] +* Remove multicast plugin {pull}16326[#16326] (issue: {issue}16310[#16310]) +* Plugins: Remove site plugins {pull}16038[#16038] +* Don't use guice for QueryParsers {pull}15761[#15761] +* Remove guice from the index level {pull}14518[#14518] +* Simplify Analysis registration and configuration {pull}14355[#14355] +* Replace IndicesLifecycle with a per-index IndexEventListener {pull}14217[#14217] (issue: {issue}13259[#13259]) + +Query DSL:: +* Remove the MissingQueryBuilder which was deprecated in 2.2.0. {pull}15364[#15364] (issue: {issue}14112[#14112]) +* Remove NotQueryBuilder {pull}14204[#14204] (issue: {issue}13761[#13761]) +* Function score query: remove deprecated support for boost_factor {pull}13510[#13510] +* Remove support for deprecated queries. {pull}13418[#13418] (issue: {issue}13326[#13326]) + +REST:: +* Limit the accepted length of the _id {pull}16036[#16036] (issue: {issue}16034[#16034]) + +Scripting:: +* Script settings {pull}16197[#16197] + +Search:: +* Remove some deprecations {pull}14331[#14331] +* Remove search exists api {pull}13911[#13911] (issues: {issue}13682[#13682], {issue}13910[#13910]) +* Query refactoring: split parse phase into fromXContent and toQuery for all queries {pull}13788[#13788] (issue: {issue}10217[#10217]) +* Remove the scan and count search types. {pull}13310[#13310] + +Search Refactoring:: +* Remove deprecated parameter from field sort builder. {pull}16573[#16573] (issue: {issue}16127[#16127]) +* Remove support for query_binary and filter_binary {pull}14433[#14433] (issue: {issue}14308[#14308]) +* Validate query api: move query parsing to the coordinating node {pull}14384[#14384] +* Remove "query" query and fix related parsing bugs {pull}14304[#14304] (issue: {issue}13326[#13326]) + +Settings:: +* Prevent index level setting from being configured on a node level {pull}17144[#17144] (issue: {issue}16799[#16799]) +* Remove es.max-open-files flag {pull}16757[#16757] (issues: {issue}16506[#16506], {issue}483[#483]) +* Enforce node level limits if node is started in production env {pull}16733[#16733] (issue: {issue}16727[#16727]) +* Move remaining settings in NettyHttpServerTransport to the new infra {pull}16531[#16531] +* Make settings validation strict {pull}16365[#16365] +* Remove the ability to fsync on every operation and only schedule fsync task if really needed {pull}16257[#16257] (issue: {issue}16152[#16152]) +* Remove index.flush_on_close entirely {pull}15977[#15977] +* Restore chunksize of 512kb on recovery and remove configurability {pull}15235[#15235] (issue: {issue}15161[#15161]) +* Remove ancient deprecated and alternative recovery settings {pull}15234[#15234] +* Replace IndexSettings annotation with a full-fledged class {pull}14251[#14251] +* Fix ping timeout settings inconsistencies {pull}13701[#13701] (issue: {issue}6579[#6579]) + +Similarities:: +* Renames `default` similarity into `classic` {pull}15446[#15446] (issue: {issue}15102[#15102]) + +Snapshot/Restore:: +* Fail closing or deleting indices during a full snapshot {pull}17021[#17021] (issue: {issue}16321[#16321]) + +Stats:: +* Modify load average format {pull}15932[#15932] (issue: {issue}15907[#15907]) +* Reintroduce five-minute and fifteen-minute load averages on Linux {pull}15907[#15907] (issues: {issue}12049[#12049], {issue}14741[#14741]) +* Add system CPU percent to OS stats {pull}14741[#14741] + +Store:: +* Standardize state format type for global and index level metadata {pull}17123[#17123] + +Term Vectors:: +* Remove DFS support from TermVector API {pull}16452[#16452] +* Term vector APIs should no longer update mappings {pull}16285[#16285] + +Translog:: +* Drop support for simple translog and hard-wire buffer to 8kb {pull}15574[#15574] +* Simplify translog-based flush settings {pull}15573[#15573] + +Warmers:: +* Remove query warmers and the warmer API. {pull}15614[#15614] (issue: {issue}15607[#15607]) + + + +[[deprecation-5.0.0-alpha1]] +[float] +=== Deprecations + +Plugin Mapper Attachment:: +* Deprecate mapper-attachments plugin {pull}16948[#16948] (issue: {issue}16910[#16910]) + +Search:: +* Deprecate fuzzy query {pull}16211[#16211] (issues: {issue}15760[#15760], {issue}16121[#16121]) + + + +[[feature-5.0.0-alpha1]] +[float] +=== New features + +Discovery:: +* Add two phased commit to Cluster State publishing {pull}13062[#13062] + +Ingest:: +* Merge feature/ingest branch into master branch {pull}16049[#16049] (issue: {issue}14049[#14049]) + +Mapping:: +* Add a text field. {pull}16637[#16637] +* Add a new `keyword` field. {pull}16589[#16589] + +Percolator:: +* index the query terms from the percolator query {pull}13646[#13646] (issue: {issue}12664[#12664]) + +Plugin Ingest Attachment:: +* Ingest: Add attachment processor {pull}16490[#16490] (issue: {issue}16303[#16303]) + +Plugin Mapper Attachment:: +* Migrate mapper attachments plugin to main repository {pull}14605[#14605] + +Plugin Repository HDFS:: +* HDFS Snapshot/Restore plugin {pull}15192[#15192] (issue: {issue}15191[#15191]) + +Query DSL:: +* Adds a rewrite phase to queries on the shard level {pull}16870[#16870] (issue: {issue}9526[#9526]) + +Reindex API:: +* Merge reindex to master {pull}16861[#16861] + +Scripting:: +* Exceptions and Infinite Loop Checking {pull}15936[#15936] +* Added a new scripting language (PlanA) {pull}15136[#15136] (issue: {issue}13084[#13084]) + +Search:: +* Add `search_after` parameter in the SearchAPI {pull}16125[#16125] (issue: {issue}8192[#8192]) + +Settings:: +* Add infrastructure to transactionally apply and reset dynamic settings {pull}15278[#15278] + +Stats:: +* API for listing index file sizes {pull}16661[#16661] (issue: {issue}16131[#16131]) + +Suggesters:: +* Add document-oriented completion suggester {pull}14410[#14410] (issue: {issue}10746[#10746]) + +Task Manager:: +* Add task cancellation mechanism {pull}16320[#16320] +* Make the Task object available to the action caller {pull}16033[#16033] +* Task Management: Add framework for registering and communicating with tasks {pull}15347[#15347] (issue: {issue}15117[#15117]) + + + +[[enhancement-5.0.0-alpha1]] +[float] +=== Enhancements + +Aggregations:: +* Add tests and documentation for using `time_zone` in date range aggregation {pull}16955[#16955] (issue: {issue}10130[#10130]) +* Refactoring of Aggregations {pull}14136[#14136] + +Allocation:: +* Write shard state metadata as soon as shard is created / initializing {pull}16625[#16625] (issue: {issue}14739[#14739]) +* Reuse existing allocation id for primary shard allocation {pull}16530[#16530] (issue: {issue}14739[#14739]) +* Remove version in ShardRouting (now obsolete) {pull}16243[#16243] (issue: {issue}14739[#14739]) +* Prefer nodes that previously held primary shard for primary shard allocation {pull}16096[#16096] (issue: {issue}14739[#14739]) +* Extend reroute with an option to force assign stale primary shard copies {pull}15708[#15708] (issue: {issue}14739[#14739]) +* Allocate primary shards based on allocation IDs {pull}15281[#15281] (issue: {issue}14739[#14739]) +* Persist currently started allocation IDs to index metadata {pull}14964[#14964] (issue: {issue}14739[#14739]) +* Use ObjectParser to parse AllocationID {pull}14962[#14962] (issue: {issue}14831[#14831]) +* Persist allocation ID with shard state metadata on nodes {pull}14831[#14831] (issue: {issue}14739[#14739]) + +CAT API:: +* Expose http address in cat/nodes {pull}16770[#16770] +* [cat/recovery] Make recovery time a TimeValue() {pull}16743[#16743] (issue: {issue}9209[#9209]) +* :CAT API: remove space at the end of a line {pull}15250[#15250] (issue: {issue}9464[#9464]) + +CRUD:: +* CRUD: Allow to get and set ttl as a time value/string {pull}15047[#15047] + +Cache:: +* Enable the indices request cache by default {pull}17162[#17162] (issues: {issue}16870[#16870], {issue}17134[#17134]) + +Cluster:: +* Resolve index names to Index instances early {pull}17048[#17048] +* Remove DiscoveryNode#shouldConnectTo method {pull}16898[#16898] (issue: {issue}16815[#16815]) +* Fail demoted primary shards and retry request {pull}16415[#16415] (issue: {issue}14252[#14252]) +* Illegal shard failure requests {pull}16275[#16275] +* Shard failure requests for non-existent shards {pull}16089[#16089] (issue: {issue}14252[#14252]) +* Add handling of channel failures when starting a shard {pull}16041[#16041] (issue: {issue}15895[#15895]) +* Wait for new master when failing shard {pull}15748[#15748] (issue: {issue}14252[#14252]) +* Master should wait on cluster state publication when failing a shard {pull}15468[#15468] (issue: {issue}14252[#14252]) +* Split cluster state update tasks into roles {pull}14899[#14899] (issue: {issue}13627[#13627]) +* Add timeout mechanism for sending shard failures {pull}14707[#14707] (issue: {issue}14252[#14252]) +* Add listener mechanism for failures to send shard failed {pull}14295[#14295] (issue: {issue}14252[#14252]) + +Core:: +* Use index UUID to lookup indices on IndicesService {pull}17001[#17001] +* Add -XX+AlwaysPreTouch JVM flag {pull}16937[#16937] +* Use and test relative time in TransportBulkAction {pull}16916[#16916] +* Bump Elasticsearch version to 5.0.0-alpha1-SNAPSHOT {pull}16862[#16862] +* Assert that we can write in all data-path on startup {pull}16745[#16745] +* Add G1GC check on startup {pull}16737[#16737] (issue: {issue}10740[#10740]) +* Shards with heavy indexing should get more of the indexing buffer {pull}14121[#14121] +* Remove and ban ImmutableMap {pull}13939[#13939] (issue: {issue}13224[#13224]) +* Finish banning ImmutableSet {pull}13820[#13820] (issue: {issue}13224[#13224]) +* Removes and bans ImmutableSet {pull}13754[#13754] (issue: {issue}13224[#13224]) +* Remove and ban ImmutableMap#entrySet {pull}13724[#13724] +* Forbid ForwardingSet {pull}13720[#13720] (issue: {issue}13224[#13224]) + +Discovery:: +* Add a dedicate queue for incoming ClusterStates {pull}13303[#13303] (issue: {issue}13062[#13062]) + +Engine:: +* Remove writeLockTimeout from InternalEngine {pull}16930[#16930] +* Don't guard IndexShard#refresh calls by a check to isRefreshNeeded {pull}16118[#16118] +* Never call a listerner under lock in InternalEngine {pull}15786[#15786] +* Use System.nanoTime() to initialize Engine.lastWriteNanos {pull}14321[#14321] +* Flush big merges automatically if shard is inactive {pull}14275[#14275] +* Remove Engine.Create {pull}13955[#13955] +* Remove the disabled autogenerated id optimization from InternalEngine {pull}13857[#13857] + +Exceptions:: +* Fix typos in exception/assert/log messages in core module. {pull}16649[#16649] +* Add field names to several mapping errors {pull}16508[#16508] (issue: {issue}16378[#16378]) +* Add serialization support for more important IOExceptions {pull}15766[#15766] +* Adds exception objects to log messages. {pull}14827[#14827] (issue: {issue}10021[#10021]) +* Remove reflection hacks from ElasticsearchException {pull}13796[#13796] +* Rename QueryParsingException to a more generic ParsingException {pull}13631[#13631] +* Add *Exception(Throwable cause) constructors/ call where appropriate {pull}13544[#13544] (issue: {issue}10021[#10021]) + +Geo:: +* Fix a potential parsing problem in GeoDistanceSortParser {pull}17111[#17111] +* Geo: Add validation of shapes to ShapeBuilders {pull}15551[#15551] (issue: {issue}14416[#14416]) +* Make remaining ShapeBuilders implement Writeable {pull}15010[#15010] (issue: {issue}14416[#14416]) +* Geo: Remove internal `translated` flag from LineStringBuilder {pull}14969[#14969] +* Make PointBuilder, CircleBuilder & EnvelopeBuilder implement Writable {pull}14933[#14933] (issue: {issue}14416[#14416]) +* Merging BaseLineString and BasePolygonBuilder with subclass {pull}14887[#14887] (issue: {issue}14482[#14482]) +* Moving static factory methods to ShapeBuilders {pull}14529[#14529] +* Remove InternalLineStringBuilder and InternalPolygonBuilder {pull}14482[#14482] (issue: {issue}14416[#14416]) + +Highlighting:: +* Joint parsing of common global Hightlighter and subfield parameters {pull}15368[#15368] (issue: {issue}15285[#15285]) +* Add fromXContent method to HighlightBuilder {pull}15157[#15157] + +Ingest:: +* Added ingest statistics to node stats API {pull}16915[#16915] +* Add `ingest_took` to bulk response {pull}16876[#16876] +* Add ingest info to node info API, which contains a list of available processors {pull}16865[#16865] +* Use diffs for ingest metadata in cluster state {pull}16847[#16847] +* hide null-valued metadata fields from WriteableIngestDocument#toXContent {pull}16557[#16557] +* Ingest: use bulk thread pool for bulk request processing (was index before) {pull}16539[#16539] (issue: {issue}16503[#16503]) +* Add foreach processor {pull}16432[#16432] +* revert PipelineFactoryError handling with throwing ElasticsearchParseException in ingest pipeline creation {pull}16355[#16355] +* Add processor tags to on_failure metadata in ingest pipeline {pull}16324[#16324] (issue: {issue}16202[#16202]) +* catch processor/pipeline factory exceptions and return structured error responses {pull}16276[#16276] (issue: {issue}16010[#16010]) +* Ingest: move get/put/delete pipeline methods to ClusterAdminClient {pull}16242[#16242] +* Geoip processor: remove redundant latitude and longitude fields and make location an object with lat and lon subfields {pull}16173[#16173] + +Internal:: +* Support scheduled commands in current context {pull}17077[#17077] +* Thread limits {pull}17003[#17003] +* Remove leniency from segments info integrity checks {pull}16985[#16985] (issue: {issue}16973[#16973]) +* Rename SearchServiceTransportAction to SearchTransportService {pull}16880[#16880] +* Decouple the TransportService and ClusterService {pull}16872[#16872] (issue: {issue}16788[#16788]) +* Refactor bootstrap checks {pull}16844[#16844] (issues: {issue}16733[#16733], {issue}16835[#16835]) +* Add LifecycleRunnable {pull}16752[#16752] +* Hot inlined methods in your area {pull}16725[#16725] +* Move IndicesQueryCache and IndicesRequestCache into IndicesService {pull}16603[#16603] +* Forbid use of java.security.MessageDigest#clone() {pull}16543[#16543] (issue: {issue}16479[#16479]) +* Make IndicesWarmer a private class of IndexService {pull}16470[#16470] +* Simplify IndicesFieldDataCache and detach from guice {pull}16469[#16469] +* Uppercase ells ('L') in long literals {pull}16329[#16329] (issue: {issue}16279[#16279]) +* ShardId equality and hash code inconsistency {pull}16319[#16319] (issue: {issue}16217[#16217]) +* Ensure all resources are closed on Node#close() {pull}16316[#16316] (issue: {issue}13685[#13685]) +* Make index uuid available in Index, ShardRouting & ShardId {pull}16217[#16217] +* Move RefreshTask into IndexService and use since task per index {pull}15933[#15933] +* Make IndexingMemoryController private to IndicesService {pull}15877[#15877] +* Cleanup IndexingOperationListeners infrastructure {pull}15875[#15875] +* Remove and forbid use of j.u.c.ThreadLocalRandom {pull}15862[#15862] (issue: {issue}15294[#15294]) +* Fix IntelliJ query builder type inference issues {pull}15429[#15429] +* Remove and forbid use of Collections#shuffle(List) and Random#() {pull}15299[#15299] (issue: {issue}15287[#15287]) +* Remove and forbid use of the type-unsafe empty Collections fields {pull}15187[#15187] +* Move IndicesService.canDeleteShardContent to use IndexSettings {pull}15150[#15150] (issue: {issue}15059[#15059]) +* Simplify MonitorService construction and detach from guice {pull}15035[#15035] +* Use Supplier for StreamInput#readOptionalStreamable {pull}14806[#14806] +* Add variable-length long encoding {pull}14780[#14780] +* Extend usage of IndexSetting class {pull}14731[#14731] (issue: {issue}14251[#14251]) +* Fold SimilarityModule into IndexModule {pull}14284[#14284] +* Move to lucene BoostQuery {pull}14264[#14264] +* Use built-in method for computing hash code of longs {pull}14213[#14213] +* Refactor ShardFailure listener infrastructure {pull}14206[#14206] +* Add methods for variable-length encoding integral arrays {pull}14087[#14087] +* Fold IndexAliasesService into IndexService {pull}14044[#14044] +* Remove unneeded Module abstractions {pull}13944[#13944] +* Query refactoring: simplify IndexQueryParserService parse methods {pull}13938[#13938] (issue: {issue}13859[#13859]) +* Remove and forbid use of com.google.common.collect.Iterators {pull}13916[#13916] (issue: {issue}13224[#13224]) +* Remove and forbid use of com.google.common.collect.ImmutableCollection {pull}13909[#13909] (issue: {issue}13224[#13224]) +* Remove and forbid use of com.google.common.io.Resources {pull}13908[#13908] (issue: {issue}13224[#13224]) +* Remove and forbid use of com.google.common.hash.* {pull}13907[#13907] (issue: {issue}13224[#13224]) +* Remove and forbid use of com.google.common.net.InetAddresses {pull}13905[#13905] (issue: {issue}13224[#13224]) +* Remove and forbid use of com.google.common.collect.EvictingQueue {pull}13903[#13903] (issue: {issue}13224[#13224]) +* Replace Guava cache with simple concurrent LRU cache {pull}13879[#13879] +* Remove ClusterSerivce and IndexSettingsService dependency from IndexShard {pull}13853[#13853] +* Start making RecoverySourceHandler unittestable {pull}13840[#13840] +* Remove IndexService dep. from IndexShard {pull}13797[#13797] +* Remove ES internal deletion policies in favour of Lucenes implementations {pull}13794[#13794] +* Move ShardTermVectorService to be on indices level as TermVectorService {pull}13786[#13786] +* Move ShardPercolateService creation into IndexShard {pull}13777[#13777] +* Remove `ExpressionScriptCompilationException` and `ExpressionScriptExecutionException` {pull}13742[#13742] +* Reduced the number of ClusterStateUpdateTask variants {pull}13735[#13735] +* Add a BaseParser helper for stream parsing {pull}13615[#13615] +* Remove and forbid use of com.google.common.primitives.Ints {pull}13596[#13596] (issue: {issue}13224[#13224]) +* Remove and forbid use of com.google.common.math.LongMath {pull}13575[#13575] (issue: {issue}13224[#13224]) +* Remove and forbid use of com.google.common.base.Joiner {pull}13572[#13572] (issue: {issue}13224[#13224]) +* Replace and ban next batch of Guava classes {pull}13562[#13562] (issue: {issue}13224[#13224]) +* Remove and forbid use of com.google.common.collect.Iterables {pull}13559[#13559] (issue: {issue}13224[#13224]) +* Replace LoadingCache usage with a simple ConcurrentHashMap {pull}13552[#13552] (issue: {issue}13224[#13224]) +* Use Supplier instead of Reflection {pull}13545[#13545] +* Remove and forbid use of com.google.common.base.Preconditions {pull}13540[#13540] (issue: {issue}13224[#13224]) +* Remove and forbid use of guava Function, Charsets, Collections2 {pull}13533[#13533] (issue: {issue}13224[#13224]) +* Remove and forbid use of com.google.common.collect.ImmutableSortedMap {pull}13525[#13525] (issue: {issue}13224[#13224]) +* Remove and forbid use of several com.google.common.util. classes {pull}13524[#13524] (issue: {issue}13224[#13224]) +* Cleanup SearchRequest & SearchRequestBuilder {pull}13518[#13518] +* Remove and forbid use of com.google.common.collect.Queues {pull}13498[#13498] (issue: {issue}13224[#13224]) +* Remove and forbid use of com.google.common.base.Preconditions#checkNotNull {pull}13493[#13493] (issue: {issue}13224[#13224]) +* Remove and forbid use of com.google.common.collect.Sets {pull}13463[#13463] (issue: {issue}13224[#13224]) +* Remove and forbid use of com.google.common.collect.Maps {pull}13438[#13438] (issue: {issue}13224[#13224]) +* Remove use of underscore as an identifier {pull}13353[#13353] +* Remove and forbid the use of com.google.common.base.Predicate(s)? {pull}13349[#13349] (issues: {issue}13224[#13224], {issue}13314[#13314]) +* This commit removes com.google.common.io {pull}13302[#13302] (issue: {issue}13224[#13224]) + +Java API:: +* Remove copy constructors from request classes and TransportMessage type {pull}16640[#16640] (issue: {issue}15776[#15776]) + +Mapping:: +* Remove friction from the mapping changes in 5.0. {pull}16991[#16991] +* Rework norms parameters for 5.0. {pull}16987[#16987] +* Moved dynamic field handling in doc parsing to end of parsing {pull}16798[#16798] +* Remove the MapperBuilders utility class. {pull}16609[#16609] +* Make the `index` property a boolean. {pull}16161[#16161] +* Remove the ability to enable doc values with the `fielddata.format` setting. {pull}16147[#16147] +* Be stricter about parsing boolean values in mappings. {pull}16146[#16146] +* Fix default doc values to be enabled when a field is not indexed. {pull}16141[#16141] +* Dynamically map floating-point numbers as floats instead of doubles. {pull}15319[#15319] (issue: {issue}13851[#13851]) +* Simplify MetaDataMappingService. {pull}15217[#15217] +* Remove MergeMappingException. {pull}15177[#15177] + +NOT CLASSIFIED:: +* Use a seed node to form multi-node cluster in integ tests {pull}17078[#17078] + +Packaging:: +* Fail early on JDK with compiler bug {pull}16418[#16418] (issues: {issue}16097[#16097], {issue}16362[#16362]) +* Make security non-optional {pull}16176[#16176] +* Remove RuntimePermission("accessDeclaredMembers") {pull}15378[#15378] +* Remove Guava as a dependency {pull}14055[#14055] (issue: {issue}13224[#13224]) +* Remove Guava as a dependency {pull}14054[#14054] (issue: {issue}13224[#13224]) + +Plugin Ingest Attachment:: +* Minor attachment processor improvements {pull}16574[#16574] + +Plugin Lang Painless:: +* Make Painless a Module {pull}16755[#16755] +* Minor Clean up {pull}16457[#16457] + +Plugin Mapper Attachment:: +* minor attachments cleanups: IDE test support and EPUB format {pull}14626[#14626] + +Plugin Repository Azure:: +* Support global `repositories.azure.` settings {pull}15141[#15141] (issue: {issue}13776[#13776]) +* Add timeout settings (default to 5 minutes) {pull}15080[#15080] (issue: {issue}14277[#14277]) + +Plugin Repository HDFS:: +* merge current hdfs improvements to master {pull}15588[#15588] + +Plugin Repository S3:: +* Add support for proxy authentication for s3 and ec2 {pull}15293[#15293] (issue: {issue}15268[#15268]) + +Plugins:: +* CliTool: Cleanup and document Terminal {pull}16443[#16443] +* Plugin cli: Improve maven coordinates detection {pull}16384[#16384] (issue: {issue}16376[#16376]) +* Enforce plugin zip does not contain zip entries outside of the plugin dir {pull}16361[#16361] +* CliTool: Allow unexpected exceptions to propagate {pull}16359[#16359] +* Reduce complexity of plugin cli {pull}16336[#16336] +* Remove Plugin.onIndexService. {pull}15029[#15029] (issue: {issue}14896[#14896]) +* Open up QueryCache and SearcherWrapper extension points {pull}14303[#14303] + +Query DSL:: +* Function Score Query: make parsing stricter {pull}16617[#16617] (issue: {issue}16583[#16583]) +* Parsers should throw exception on unknown objects {pull}14255[#14255] (issue: {issue}10974[#10974]) +* UNICODE_CHARACTER_CLASS fix {pull}11598[#11598] (issue: {issue}10146[#10146]) + +Query Refactoring:: +* Add infrastructure to rewrite query builders {pull}16599[#16599] +* Switch geo validation to enum {pull}13672[#13672] (issue: {issue}13608[#13608]) + +REST:: +* More robust handling of CORS HTTP Access Control {pull}16092[#16092] +* Add option to exclude based on paths in XContent {pull}16017[#16017] + +Recovery:: +* Relocation source should be marked as relocating before starting recovery to primary relocation target {pull}16500[#16500] +* Operation counter for IndexShard {pull}15956[#15956] (issue: {issue}15900[#15900]) +* Primary relocation handoff {pull}15900[#15900] (issue: {issue}15532[#15532]) +* Remove recovery threadpools and throttle outgoing recoveries on the master {pull}15372[#15372] +* Refactor StoreRecoveryService to be a simple package private util class {pull}13766[#13766] + +Reindex API:: +* Add ingest pipeline support to reindex {pull}16932[#16932] + +Scripting:: +* Remove Extra String Concat Token {pull}16382[#16382] +* Skipping hidden files compilation for script service {pull}16286[#16286] (issue: {issue}15269[#15269]) +* Rename Plan A to Painless {pull}16245[#16245] +* Add plumbing for script compile-time parameters {pull}15464[#15464] +* Factor mustache -> modules/lang-mustache {pull}15328[#15328] + +Search:: +* Store _all payloads on 1 byte instead of 4. {pull}16899[#16899] +* Refuse to load fields from _source when using the `fields` option and support wildcards. {pull}15017[#15017] (issues: {issue}10783[#10783], {issue}14489[#14489]) +* Add response into ClearScrollResponse {pull}13835[#13835] (issue: {issue}13817[#13817]) +* Shuffle shards for _only_nodes + support multiple specifications like cluster API {pull}12575[#12575] (issues: {issue}12546[#12546], {issue}12700[#12700]) + +Search Refactoring:: +* Move sort `order` field up into SortBuilder {pull}17035[#17035] +* Moves SortParser:parse(...) to only require QueryShardContext {pull}16999[#16999] (issue: {issue}15178[#15178]) +* Change internal representation of suggesters {pull}16873[#16873] +* Fixes serialisation of Ranges {pull}16674[#16674] +* Make GeoDistanceSortBuilder serializable, 2nd try {pull}16572[#16572] (issues: {issue}15178[#15178], {issue}16151[#16151]) +* Move missing() from SortBuilder interface to class {pull}16225[#16225] (issues: {issue}15178[#15178], {issue}16151[#16151]) +* Remove deprecated parameters from ScriptSortBuilder {pull}16153[#16153] (issue: {issue}15178[#15178]) +* Refactor GeoSortBuilder {pull}16151[#16151] (issue: {issue}15178[#15178]) +* Refactor FieldSortBuilder {pull}16127[#16127] (issue: {issue}15178[#15178]) +* Make sort order enum writable. {pull}16124[#16124] (issue: {issue}15178[#15178]) +* Make DistanceUnit writable. {pull}16122[#16122] (issue: {issue}15178[#15178]) +* RescoreBuilder: Add parsing and creating of RescoreSearchContext {pull}16014[#16014] (issue: {issue}15559[#15559]) +* Make RescoreBuilder and nested QueryRescorer Writable {pull}15953[#15953] (issue: {issue}15559[#15559]) +* Use HighlightBuilder in SearchSourceBuilder {pull}15376[#15376] (issue: {issue}15044[#15044]) +* Enable HighlightBuilder to create SearchContextHighlight {pull}15324[#15324] +* Explain api: move query parsing to the coordinating node {pull}14270[#14270] +* Switch query parsers to use ParseField {pull}14249[#14249] (issue: {issue}8964[#8964]) + +Settings:: +* Settings with complex matchers should not overlap {pull}16754[#16754] +* Moves GCE settings to the new infra {pull}16722[#16722] (issue: {issue}16720[#16720]) +* Add filtering support within Setting class {pull}16629[#16629] (issue: {issue}16598[#16598]) +* Migrate AWS settings to new settings infrastructure {pull}16602[#16602] (issue: {issue}16293[#16293]) +* Remove `gateway.initial_meta` and always rely on min master nodes {pull}16446[#16446] +* Rewrite SettingsFilter to be immutable {pull}16425[#16425] +* Simplify azure settings {pull}16363[#16363] +* Convert PageCacheRecycler settings {pull}16341[#16341] +* Monitor settings {pull}16313[#16313] +* Cut over tribe node settings to new settings infra {pull}16311[#16311] +* Convert multcast plugin settings to the new infra {pull}16295[#16295] +* Convert `request.headers.*` to the new settings infra {pull}16292[#16292] +* Migrate Azure settings to new settings infrastructure {pull}16291[#16291] +* Validate logger settings and allow them to be reset via API {pull}16289[#16289] +* Switch NodeEnvironment's settings to new settings {pull}16273[#16273] +* Simplify AutoCreateIndex and add more tests {pull}16270[#16270] +* Convert several pending settings {pull}16269[#16269] +* Migrate query caching settings to the new settings infra. {pull}16267[#16267] +* Convert `action.auto_create_index` and `action.master.force_local` to the new settings infra {pull}16263[#16263] +* Convert `cluster.routing.allocation.type` and `processors` to the new settings infra. {pull}16238[#16238] +* Validate tribe node settings on startup {pull}16237[#16237] +* Move node.client, node.data, node.master, node.local and node.mode to new settings infra {pull}16230[#16230] +* Moved http settings to the new settings infrastructure {pull}16188[#16188] +* Migrate network service to the new infra {pull}16187[#16187] +* Convert client.transport settings to new infra {pull}16183[#16183] +* Move discovery.* settings to new Setting infrastructure {pull}16182[#16182] +* Change over to o.e.common.settings.Setting for http settings {pull}16181[#16181] +* Convert "path.*" and "pidfile" to new settings infra {pull}16180[#16180] +* Migrate repository settings to the new settings API {pull}16178[#16178] +* Convert "indices.*" settings to new infra. {pull}16177[#16177] +* Migrate gateway settings to the new settings API. {pull}16175[#16175] +* Convert several node and test level settings {pull}16172[#16172] +* Run Metadata upgrade tool on every version {pull}16168[#16168] +* Check for invalid index settings on metadata upgrade {pull}16156[#16156] +* Validate the settings key if it's simple chars separated by `.` {pull}16120[#16120] +* Validate known global settings on startup {pull}16091[#16091] +* Cut over all index scope settings to the new setting infrastrucuture {pull}16054[#16054] (issues: {issue}12790[#12790], {issue}12854[#12854], {issue}16032[#16032], {issue}6732[#6732]) +* Remove updatability of `index.flush_on_close` {pull}15964[#15964] (issue: {issue}15955[#15955]) +* Move all dynamic settings and their config classes to the index level {pull}15955[#15955] (issue: {issue}6732[#6732]) +* Always require units for bytes and time settings {pull}15948[#15948] (issue: {issue}11437[#11437]) +* Make MetaData parsing less lenient. {pull}15828[#15828] +* Move async translog sync logic into IndexService {pull}15584[#15584] +* Remove `index.merge.scheduler.notify_on_failure` and default to `true` {pull}15572[#15572] (issue: {issue}15570[#15570]) +* Remove cache concurrency level settings that no longer apply {pull}14210[#14210] (issues: {issue}13224[#13224], {issue}13717[#13717], {issue}7836[#7836]) + +Similarities:: +* Defining a global default similarity {pull}16682[#16682] (issue: {issue}16594[#16594]) + +Snapshot/Restore:: +* Remove AbstractLegacyBlobContainer {pull}14650[#14650] (issue: {issue}13434[#13434]) + +Stats:: +* Normalize unavailable load average {pull}16061[#16061] (issues: {issue}12049[#12049], {issue}14741[#14741], {issue}15907[#15907], {issue}15932[#15932], {issue}15934[#15934]) +* Add load averages to OS stats on FreeBSD {pull}15934[#15934] (issue: {issue}15917[#15917]) +* Expose pending cluster state queue size in node stats {pull}14040[#14040] (issue: {issue}13610[#13610]) + +Store:: +* Remove support for legacy checksums {pull}16931[#16931] +* Rename index folder to index_uuid {pull}16442[#16442] (issues: {issue}13264[#13264], {issue}13265[#13265], {issue}14512[#14512], {issue}14932[#14932], {issue}15853[#15853]) + +Suggesters:: +* Refactoring of Suggestions {pull}17096[#17096] (issue: {issue}10217[#10217]) + +Task Manager:: +* Add start time and duration to tasks {pull}16829[#16829] +* Combine node name and task id into single string task id {pull}16744[#16744] +* Add task status {pull}16356[#16356] (issue: {issue}16344[#16344]) +* Extend tracking of parent tasks to master node, replication and broadcast actions {pull}15931[#15931] + +Translog:: +* Remove ChannelReference and simplify Views {pull}15898[#15898] +* Simplify TranslogWriter to always write to a stream {pull}15771[#15771] +* Remove TranslogService and fold it into synchronous IndexShard API {pull}13707[#13707] + + + +[[bug-5.0.0-alpha1]] +[float] +=== Bug fixes + +Aggregations:: +* Correct typo in class name of StatsAggregator {pull}15264[#15264] (issue: {issue}14730[#14730]) + +Allocation:: +* Replica shards must be failed before primary shards {pull}15686[#15686] + +CRUD:: +* Prevent TransportReplicationAction to route request based on stale local routing table {pull}16274[#16274] (issues: {issue}12573[#12573], {issue}12574[#12574]) +* Resolves the conflict between alias routing and parent routing by applying the alias routing and ignoring the parent routing. {pull}15371[#15371] (issue: {issue}3068[#3068]) + +Cluster:: +* Shard state action channel exceptions {pull}16057[#16057] (issue: {issue}15748[#15748]) + +Geo:: +* Fix multi-field support for GeoPoint types {pull}15702[#15702] (issue: {issue}15701[#15701]) +* Enforce distance in distance query is > 0 {pull}15135[#15135] + +Ingest:: +* The IngestDocument copy constructor should make a deep copy {pull}16248[#16248] (issue: {issue}16246[#16246]) + +Internal:: +* Enable unmap hack for java 9 {pull}16986[#16986] (issue: {issue}1[#1]) +* Fix issues with failed cache loads {pull}14315[#14315] +* Allow parser to move on the START_OBJECT token when parsing search source {pull}14145[#14145] +* Ensure searcher is release if wrapping fails {pull}14107[#14107] +* Avoid deadlocks in Cache#computeIfAbsent {pull}14091[#14091] (issue: {issue}14090[#14090]) + +Java API:: +* Fix potential NPE in SearchSourceBuilder {pull}16905[#16905] (issue: {issue}16902[#16902]) + +Mapping:: +* Fix dynamic mapper when its parent already has an update {pull}17065[#17065] +* Fix copy_to when the target is a dynamic object field. {pull}15216[#15216] (issues: {issue}111237[#111237], {issue}11237[#11237]) +* Preserve existing mappings on batch mapping updates {pull}15130[#15130] (issues: {issue}14899[#14899], {issue}15129[#15129]) + +Packaging:: +* Do not pass double-dash arguments on startup {pull}17087[#17087] (issue: {issue}17084[#17084]) + +Plugin Store SMB:: +* Fix calling ensureOpen() on the wrong directory (master forwardport) {pull}16395[#16395] (issue: {issue}16383[#16383]) + +Plugins:: +* CliTool: Messages printed in Terminal should have percent char escaped {pull}16367[#16367] + +Query DSL:: +* `constant_score` query should throw error on more than one filter {pull}17135[#17135] (issue: {issue}17126[#17126]) +* Single IPv4 addresses in IP field term queries {pull}16068[#16068] (issue: {issue}16058[#16058]) +* Make strategy optional in GeoShapeQueryBuilder readFrom and writeTo {pull}13963[#13963] + +Query Refactoring:: +* Query refactoring: set has_parent & has_child types context properly {pull}13863[#13863] +* Make sure equivalent geohashCellQueries are equal after toQuery called {pull}13792[#13792] + +Recovery:: +* Prevent interruption while store checks lucene files for consistency {pull}16308[#16308] +* Mark shard as recovering on the cluster state thread {pull}14276[#14276] (issues: {issue}13766[#13766], {issue}14115[#14115]) + +Search:: +* Fix for search after {pull}16271[#16271] +* Do not be lenient when parsing CIDRs {pull}14874[#14874] (issue: {issue}14862[#14862]) + +Settings:: +* Register bootstrap settings {pull}16513[#16513] +* Add settings filtering to node info requests {pull}16445[#16445] +* Ban write access to system properties {pull}14914[#14914] + +Translog:: +* Mark shard active during recovery; push settings after engine finally inits {pull}16250[#16250] (issues: {issue}14121[#14121], {issue}16209[#16209]) + + + +[[upgrade-5.0.0-alpha1]] +[float] +=== Upgrades + +Core:: +* Upgrade to lucene-6.0.0-f0aa4fc. {pull}17075[#17075] +* upgrade to lucene 6.0.0-snapshot-bea235f {pull}16964[#16964] +* Upgrade to Jackson 2.7.1 {pull}16801[#16801] (issue: {issue}16294[#16294]) + +Ingest:: +* Update MaxMind geoip2 version to 2.6 {pull}16837[#16837] (issue: {issue}16801[#16801]) + +Internal:: +* Bump master (3.0-snapshot) to java 8 {pull}13314[#13314] + +Search Templates:: +* Update mustache.java to version 0.9.1 {pull}14053[#14053] (issue: {issue}13224[#13224]) + + diff --git a/docs/reference/search/percolate.asciidoc b/docs/reference/search/percolate.asciidoc index bb4040770eb..44400f5b816 100644 --- a/docs/reference/search/percolate.asciidoc +++ b/docs/reference/search/percolate.asciidoc @@ -3,511 +3,6 @@ added[5.0.0,Percolator queries modifications aren't visible immediately and a refresh is required] -added[5.0.0,Percolate api by defaults limits the number of matches to `10` whereas before this wasn't set] +added[5.0.0,Percolate and multi percolate APIs have been deprecated and has been replaced by <>] added[5.0.0,For indices created on or after version 5.0.0 the percolator automatically indexes the query terms with the percolator queries this allows the percolator to percolate documents quicker. It is advisable to reindex any pre 5.0.0 indices to take advantage of this new optimization] - -Traditionally you design documents based on your data, store them into an index, and then define queries via the search API -in order to retrieve these documents. The percolator works in the opposite direction. First you store queries into an -index and then, via the percolate API, you define documents in order to retrieve these queries. - -The reason that queries can be stored comes from the fact that in Elasticsearch both documents and queries are defined in -JSON. This allows you to embed queries into documents via the index API. Elasticsearch can extract the query from a -document and make it available to the percolate API. Since documents are also defined as JSON, you can define a document -in a request to the percolate API. - -[IMPORTANT] -===================================== - -Fields referred to in a percolator query must *already* exist in the mapping -associated with the index used for percolation. In order to make sure these fields exist, -add or update a mapping via the <> or <> APIs. - -===================================== - -[float] -=== Sample Usage - -Create an index with a mapping for the field `message`: - -[source,js] --------------------------------------------------- -curl -XPUT 'localhost:9200/my-index' -d '{ - "mappings": { - "my-type": { - "properties": { - "message": { - "type": "string" - } - } - } - } -}' --------------------------------------------------- - -Register a query in the percolator: - -[source,js] --------------------------------------------------- -curl -XPUT 'localhost:9200/my-index/.percolator/1' -d '{ - "query" : { - "match" : { - "message" : "bonsai tree" - } - } -}' --------------------------------------------------- - -Match a document to the registered percolator queries: - -[source,js] --------------------------------------------------- -curl -XGET 'localhost:9200/my-index/my-type/_percolate' -d '{ - "doc" : { - "message" : "A new bonsai tree in the office" - } -}' --------------------------------------------------- - -The above request will yield the following response: - -[source,js] --------------------------------------------------- -{ - "took" : 19, - "_shards" : { - "total" : 5, - "successful" : 5, - "failed" : 0 - }, - "total" : 1, - "matches" : [ <1> - { - "_index" : "my-index", - "_id" : "1" - } - ] -} --------------------------------------------------- - -<1> The percolate query with id `1` matches our document. - -[float] -=== Indexing Percolator Queries - -Percolate queries are stored as documents in a specific format and in an arbitrary index under a reserved type with the -name `.percolator`. The query itself is placed as is in a JSON object under the top level field `query`. - -[source,js] --------------------------------------------------- -{ - "query" : { - "match" : { - "field" : "value" - } - } -} --------------------------------------------------- - -Since this is just an ordinary document, any field can be added to this document. This can be useful later on to only -percolate documents by specific queries. - -[source,js] --------------------------------------------------- -{ - "query" : { - "match" : { - "field" : "value" - } - }, - "priority" : "high" -} --------------------------------------------------- - -On top of this, also a mapping type can be associated with this query. This allows to control how certain queries -like range queries, shape filters, and other query & filters that rely on mapping settings get constructed. This is -important since the percolate queries are indexed into the `.percolator` type, and the queries / filters that rely on -mapping settings would yield unexpected behaviour. Note: By default, field names do get resolved in a smart manner, -but in certain cases with multiple types this can lead to unexpected behavior, so being explicit about it will help. - -[source,js] --------------------------------------------------- -{ - "query" : { - "range" : { - "created_at" : { - "gte" : "2010-01-01T00:00:00", - "lte" : "2011-01-01T00:00:00" - } - } - }, - "type" : "tweet", - "priority" : "high" -} --------------------------------------------------- - -In the above example the range query really gets parsed into a Lucene numeric range query, based on the settings for -the field `created_at` in the type `tweet`. - -Just as with any other type, the `.percolator` type has a mapping, which you can configure via the mappings APIs. -The default percolate mapping doesn't index the query field, only stores it. - -Because `.percolate` is a type it also has a mapping. By default the following mapping is active: - -[source,js] --------------------------------------------------- -{ - ".percolator" : { - "properties" : { - "query" : { - "type" : "object", - "enabled" : false - } - } - } -} --------------------------------------------------- - -If needed, this mapping can be modified with the update mapping API. - -In order to un-register a percolate query the delete API can be used. So if the previous added query needs to be deleted -the following delete requests needs to be executed: - -[source,js] --------------------------------------------------- -curl -XDELETE localhost:9200/my-index/.percolator/1 --------------------------------------------------- - -[float] -=== Percolate API - -The percolate API executes in a distributed manner, meaning it executes on all shards an index points to. - -.Required options -* `index` - The index that contains the `.percolator` type. This can also be an alias. -* `type` - The type of the document to be percolated. The mapping of that type is used to parse document. -* `doc` - The actual document to percolate. Unlike the other two options this needs to be specified in the request body. Note: This isn't required when percolating an existing document. - -[source,js] --------------------------------------------------- -curl -XGET 'localhost:9200/twitter/tweet/_percolate' -d '{ - "doc" : { - "created_at" : "2010-10-10T00:00:00", - "message" : "some text" - } -}' --------------------------------------------------- - -.Additional supported query string options -* `routing` - In case the percolate queries are partitioned by a custom routing value, that routing option makes sure -that the percolate request only gets executed on the shard where the routing value is partitioned to. This means that -the percolate request only gets executed on one shard instead of all shards. Multiple values can be specified as a -comma separated string, in that case the request can be be executed on more than one shard. -* `preference` - Controls which shard replicas are preferred to execute the request on. Works the same as in the search API. -* `ignore_unavailable` - Controls if missing concrete indices should silently be ignored. Same as is in the search API. -* `percolate_format` - If `ids` is specified then the matches array in the percolate response will contain a string -array of the matching ids instead of an array of objects. This can be useful to reduce the amount of data being send -back to the client. Obviously if there are two percolator queries with same id from different indices there is no way -to find out which percolator query belongs to what index. Any other value to `percolate_format` will be ignored. - -.Additional request body options -* `filter` - Reduces the number queries to execute during percolating. Only the percolator queries that match with the -filter will be included in the percolate execution. The filter option works in near realtime, so a refresh needs to have -occurred for the filter to included the latest percolate queries. -* `query` - Same as the `filter` option, but also the score is computed. The computed scores can then be used by the -`track_scores` and `sort` option. -* `size` - Defines to maximum number of matches (percolate queries) to be returned. Defaults to 10. -* `track_scores` - Whether the `_score` is included for each match. The `_score` is based on the query and represents -how the query matched the *percolate query's metadata*, *not* how the document (that is being percolated) matched -the query. The `query` option is required for this option. Defaults to `false`. -* `sort` - Define a sort specification like in the search API. Currently only sorting `_score` reverse (default relevancy) -is supported. Other sort fields will throw an exception. The `size` and `query` option are required for this setting. Like -`track_score` the score is based on the query and represents how the query matched to the percolate query's metadata -and *not* how the document being percolated matched to the query. -* `aggs` - Allows aggregation definitions to be included. The aggregations are based on the matching percolator queries, -look at the aggregation documentation on how to define aggregations. -* `highlight` - Allows highlight definitions to be included. The document being percolated is being highlight for each -matching query. This allows you to see how each match is highlighting the document being percolated. See highlight -documentation on how to define highlights. The `size` option is required for highlighting, the performance of highlighting - in the percolate API depends of how many matches are being highlighted. - -[float] -=== Dedicated Percolator Index - -Percolate queries can be added to any index. Instead of adding percolate queries to the index the data resides in, -these queries can also be added to a dedicated index. The advantage of this is that this dedicated percolator index -can have its own index settings (For example the number of primary and replica shards). If you choose to have a dedicated -percolate index, you need to make sure that the mappings from the normal index are also available on the percolate index. -Otherwise percolate queries can be parsed incorrectly. - -[float] -=== Filtering Executed Queries - -Filtering allows to reduce the number of queries, any filter that the search API supports, (except the ones mentioned in important notes) -can also be used in the percolate API. The filter only works on the metadata fields. The `query` field isn't indexed by -default. Based on the query we indexed before, the following filter can be defined: - -[source,js] --------------------------------------------------- -curl -XGET localhost:9200/test/type1/_percolate -d '{ - "doc" : { - "field" : "value" - }, - "filter" : { - "term" : { - "priority" : "high" - } - } -}' --------------------------------------------------- - -[float] -=== Percolator Count API - -The count percolate API, only keeps track of the number of matches and doesn't keep track of the actual matches -Example: - -[source,js] --------------------------------------------------- -curl -XGET 'localhost:9200/my-index/my-type/_percolate/count' -d '{ - "doc" : { - "message" : "some message" - } -}' --------------------------------------------------- - -Response: - -[source,js] --------------------------------------------------- -{ - ... // header - "total" : 3 -} --------------------------------------------------- - - -[float] -=== Percolating an Existing Document - -In order to percolate a newly indexed document, the percolate existing document can be used. Based on the response -from an index request, the `_id` and other meta information can be used to immediately percolate the newly added -document. - -.Supported options for percolating an existing document on top of existing percolator options: -* `id` - The id of the document to retrieve the source for. -* `percolate_index` - The index containing the percolate queries. Defaults to the `index` defined in the url. -* `percolate_type` - The percolate type (used for parsing the document). Default to `type` defined in the url. -* `routing` - The routing value to use when retrieving the document to percolate. -* `preference` - Which shard to prefer when retrieving the existing document. -* `percolate_routing` - The routing value to use when percolating the existing document. -* `percolate_preference` - Which shard to prefer when executing the percolate request. -* `version` - Enables a version check. If the fetched document's version isn't equal to the specified version then the request fails with a version conflict and the percolation request is aborted. - -Internally the percolate API will issue a GET request for fetching the `_source` of the document to percolate. -For this feature to work, the `_source` for documents to be percolated needs to be stored. - -If percolating an existing document and the a document is also specified in the source of the percolate request then -an error is thrown. Either the document to percolate should be specified in the source or be defined by specifying the -index, type and id. - -[float] -==== Example - -Index response: - -[source,js] --------------------------------------------------- -{ - "_index" : "my-index", - "_type" : "message", - "_id" : "1", - "_version" : 1, - "created" : true -} --------------------------------------------------- - -Percolating an Existing Document: - -[source,js] --------------------------------------------------- -curl -XGET 'localhost:9200/my-index1/message/1/_percolate' --------------------------------------------------- - -The response is the same as with the regular percolate API. - -[float] -=== Multi Percolate API - -The multi percolate API allows to bundle multiple percolate requests into a single request, similar to what the multi -search API does to search requests. The request body format is line based. Each percolate request item takes two lines, -the first line is the header and the second line is the body. - -The header can contain any parameter that normally would be set via the request path or query string parameters. -There are several percolate actions, because there are multiple types of percolate requests. - -.Supported actions: -* `percolate` - Action for defining a regular percolate request. -* `count` - Action for defining a count percolate request. - -Depending on the percolate action different parameters can be specified. For example the percolate and percolate existing -document actions support different parameters. - -.The following endpoints are supported -* `GET|POST /[index]/[type]/_mpercolate` -* `GET|POST /[index]/_mpercolate` -* `GET|POST /_mpercolate` - -The `index` and `type` defined in the url path are the default index and type. - -[float] -==== Example - -Request: - -[source,js] --------------------------------------------------- -curl -XGET 'localhost:9200/twitter/tweet/_mpercolate' --data-binary "@requests.txt"; echo --------------------------------------------------- - -The index `twitter` is the default index, and the type `tweet` is the default type and will be used in the case a header -doesn't specify an index or type. - -requests.txt: - -[source,js] --------------------------------------------------- -{"percolate" : {"index" : "twitter", "type" : "tweet"}} -{"doc" : {"message" : "some text"}} -{"percolate" : {"index" : "twitter", "type" : "tweet", "id" : "1"}} -{"percolate" : {"index" : "users", "type" : "user", "id" : "3", "percolate_index" : "users_2012" }} -{"size" : 10} -{"count" : {"index" : "twitter", "type" : "tweet"}} -{"doc" : {"message" : "some other text"}} -{"count" : {"index" : "twitter", "type" : "tweet", "id" : "1"}} --------------------------------------------------- - -For a percolate existing document item (headers with the `id` field), the response can be an empty JSON object. -All the required options are set in the header. - -Response: - -[source,js] --------------------------------------------------- -{ - "responses" : [ - { - "took" : 24, - "_shards" : { - "total" : 5, - "successful" : 5, - "failed" : 0, - }, - "total" : 3, - "matches" : [ - { - "_index": "twitter", - "_id": "1" - }, - { - "_index": "twitter", - "_id": "2" - }, - { - "_index": "twitter", - "_id": "3" - } - ] - }, - { - "took" : 12, - "_shards" : { - "total" : 5, - "successful" : 5, - "failed" : 0, - }, - "total" : 3, - "matches" : [ - { - "_index": "twitter", - "_id": "4" - }, - { - "_index": "twitter", - "_id": "5" - }, - { - "_index": "twitter", - "_id": "6" - } - ] - }, - { - "error" : "DocumentMissingException[[_na][_na] [user][3]: document missing]" - }, - { - "took" : 12, - "_shards" : { - "total" : 5, - "successful" : 5, - "failed" : 0, - }, - "total" : 3 - }, - { - "took" : 14, - "_shards" : { - "total" : 5, - "successful" : 5, - "failed" : 0, - }, - "total" : 3 - } - ] -} - --------------------------------------------------- - -Each item represents a percolate response, the order of the items maps to the order in which the percolate requests -were specified. In case a percolate request failed, the item response is substituted with an error message. - -[float] -=== How it Works Under the Hood - -When indexing a document that contains a query in an index and the `.percolator` type, the query part of the documents gets -parsed into a Lucene query and is kept in memory until that percolator document is removed or the index containing the -`.percolator` type gets removed. So, all the active percolator queries are kept in memory. - -At percolate time, the document specified in the request gets parsed into a Lucene document and is stored in a in-memory -Lucene index. This in-memory index can just hold this one document and it is optimized for that. Then all the queries -that are registered to the index that the percolate request is targeted for, are going to be executed on this single document -in-memory index. This happens on each shard the percolate request needs to execute. - -By using `routing`, `filter` or `query` features the amount of queries that need to be executed can be reduced and thus -the time the percolate API needs to run can be decreased. - -[float] -=== Important Notes - -Because the percolator API is processing one document at a time, it doesn't support queries and filters that run -against child documents such as `has_child` and `has_parent`. - -The `inner_hits` feature on the `nested` query isn't supported in the percolate api. - -The `wildcard` and `regexp` query natively use a lot of memory and because the percolator keeps the queries into memory -this can easily take up the available memory in the heap space. If possible try to use a `prefix` query or ngramming to -achieve the same result (with way less memory being used). - -The `delete-by-query` plugin doesn't work to unregister a query, it only deletes the percolate documents from disk. In order -to update the registered queries in memory the index needs be closed and opened. - -[float] -=== Forcing Unmapped Fields to be Handled as Strings - -In certain cases it is unknown what kind of percolator queries do get registered, and if no field mapping exists for fields -that are referred by percolator queries then adding a percolator query fails. This means the mapping needs to be updated -to have the field with the appropriate settings, and then the percolator query can be added. But sometimes it is sufficient -if all unmapped fields are handled as if these were default string fields. In those cases one can configure the -`index.percolator.map_unmapped_fields_as_string` setting to `true` (default to `false`) and then if a field referred in -a percolator query does not exist, it will be handled as a default string field so that adding the percolator query doesn't -fail. diff --git a/docs/reference/setup.asciidoc b/docs/reference/setup.asciidoc index 15f23e6fe1e..f0bf03985b0 100644 --- a/docs/reference/setup.asciidoc +++ b/docs/reference/setup.asciidoc @@ -67,13 +67,12 @@ There are added features when using the `elasticsearch` shell script. The first, which was explained earlier, is the ability to easily run the process either in the foreground or the background. -Another feature is the ability to pass `-D` or getopt long style -configuration parameters directly to the script. When set, all override -anything set using either `JAVA_OPTS` or `ES_JAVA_OPTS`. For example: +Another feature is the ability to pass `-E` configuration parameters +directly to the script. For example: [source,sh] -------------------------------------------------- -$ bin/elasticsearch -Des.index.refresh_interval=5s --node.name=my-node +$ bin/elasticsearch -Ees.index.refresh_interval=5s -Ees.node.name=my-node -------------------------------------------------- ************************************************************************* diff --git a/docs/reference/setup/configuration.asciidoc b/docs/reference/setup/configuration.asciidoc index 03037207fb0..1a687f15fb9 100644 --- a/docs/reference/setup/configuration.asciidoc +++ b/docs/reference/setup/configuration.asciidoc @@ -43,6 +43,13 @@ using the <> API, with: curl localhost:9200/_nodes/stats/process?pretty -------------------------------------------------- +[float] +[[max-number-of-threads]] +==== Number of threads + +Make sure that the number of threads that the Elasticsearch user can +create is at least 2048. + [float] [[vm-max-map-count]] ==== Virtual memory @@ -252,7 +259,7 @@ command, for example: [source,sh] -------------------------------------------------- -$ elasticsearch -Des.network.host=10.0.0.4 +$ elasticsearch -Ees.network.host=10.0.0.4 -------------------------------------------------- Another option is to set `es.default.` prefix instead of `es.` prefix, @@ -329,7 +336,7 @@ course, the above can also be set as a "collapsed" setting, for example: [source,sh] -------------------------------------------------- -$ elasticsearch -Des.index.refresh_interval=5s +$ elasticsearch -Ees.index.refresh_interval=5s -------------------------------------------------- All of the index level configuration can be found within each diff --git a/docs/reference/setup/rolling_upgrade.asciidoc b/docs/reference/setup/rolling_upgrade.asciidoc index b3c00d337f8..cb9073b558e 100644 --- a/docs/reference/setup/rolling_upgrade.asciidoc +++ b/docs/reference/setup/rolling_upgrade.asciidoc @@ -80,7 +80,7 @@ To upgrade using a zip or compressed tarball: overwrite the `config` or `data` directories. * Either copy the files in the `config` directory from your old installation - to your new installation, or use the `--path.conf` option on the command + to your new installation, or use the `-E path.conf=` option on the command line to point to an external config directory. * Either copy the files in the `data` directory from your old installation diff --git a/modules/ingest-grok/src/test/resources/rest-api-spec/test/ingest_grok/10_basic.yaml b/modules/ingest-grok/src/test/resources/rest-api-spec/test/ingest_grok/10_basic.yaml index 5c0cca3772e..ebb310ecf7a 100644 --- a/modules/ingest-grok/src/test/resources/rest-api-spec/test/ingest_grok/10_basic.yaml +++ b/modules/ingest-grok/src/test/resources/rest-api-spec/test/ingest_grok/10_basic.yaml @@ -9,3 +9,18 @@ nodes.info: {} - match: { nodes.$master.modules.0.name: ingest-grok } + - match: { nodes.$master.ingest.processors.0.type: append } + - match: { nodes.$master.ingest.processors.1.type: convert } + - match: { nodes.$master.ingest.processors.2.type: date } + - match: { nodes.$master.ingest.processors.3.type: fail } + - match: { nodes.$master.ingest.processors.4.type: foreach } + - match: { nodes.$master.ingest.processors.5.type: grok } + - match: { nodes.$master.ingest.processors.6.type: gsub } + - match: { nodes.$master.ingest.processors.7.type: join } + - match: { nodes.$master.ingest.processors.8.type: lowercase } + - match: { nodes.$master.ingest.processors.9.type: remove } + - match: { nodes.$master.ingest.processors.10.type: rename } + - match: { nodes.$master.ingest.processors.11.type: set } + - match: { nodes.$master.ingest.processors.12.type: split } + - match: { nodes.$master.ingest.processors.13.type: trim } + - match: { nodes.$master.ingest.processors.14.type: uppercase } diff --git a/modules/lang-expression/licenses/lucene-expressions-5.5.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-5.5.0.jar.sha1 deleted file mode 100644 index 15c992bf460..00000000000 --- a/modules/lang-expression/licenses/lucene-expressions-5.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4766406a2933ac9df62c49d6619caabb9943aba2 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-6.0.0-snapshot-f0aa4fc.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..5237907f224 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +f36f8010c9fec7342d34bece819c13de5f241135 \ No newline at end of file diff --git a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionSearchScript.java b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionSearchScript.java index 0f56adeea55..3944090cef2 100644 --- a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionSearchScript.java +++ b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionSearchScript.java @@ -112,14 +112,16 @@ class ExpressionSearchScript implements SearchScript { @Override public void setNextVar(String name, Object value) { - assert(specialValue != null); // this should only be used for the special "_value" variable used in aggregations assert(name.equals("_value")); - if (value instanceof Number) { - specialValue.setValue(((Number)value).doubleValue()); - } else { - throw new ScriptException("Cannot use expression with text variable using " + compiledScript); + // _value isn't used in script if specialValue == null + if (specialValue != null) { + if (value instanceof Number) { + specialValue.setValue(((Number)value).doubleValue()); + } else { + throw new ScriptException("Cannot use expression with text variable using " + compiledScript); + } } } }; diff --git a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java index 5246d0dc306..a8856ea78b5 100644 --- a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java +++ b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java @@ -383,7 +383,11 @@ public class MoreExpressionTests extends ESIntegTestCase { .script(new Script("_value * 3", ScriptType.INLINE, ExpressionScriptEngineService.NAME, null))) .addAggregation( AggregationBuilders.stats("double_agg").field("y") - .script(new Script("_value - 1.1", ScriptType.INLINE, ExpressionScriptEngineService.NAME, null))); + .script(new Script("_value - 1.1", ScriptType.INLINE, ExpressionScriptEngineService.NAME, null))) + .addAggregation( + AggregationBuilders.stats("const_agg").field("x") // specifically to test a script w/o _value + .script(new Script("3.0", ScriptType.INLINE, ExpressionScriptEngineService.NAME, null)) + ); SearchResponse rsp = req.get(); assertEquals(3, rsp.getHits().getTotalHits()); @@ -395,6 +399,11 @@ public class MoreExpressionTests extends ESIntegTestCase { stats = rsp.getAggregations().get("double_agg"); assertEquals(0.7, stats.getMax(), 0.0001); assertEquals(0.1, stats.getMin(), 0.0001); + + stats = rsp.getAggregations().get("const_agg"); + assertThat(stats.getMax(), equalTo(3.0)); + assertThat(stats.getMin(), equalTo(3.0)); + assertThat(stats.getAvg(), equalTo(3.0)); } public void testStringSpecialValueVariable() throws Exception { diff --git a/modules/lang-groovy/build.gradle b/modules/lang-groovy/build.gradle index 89444a4e926..884fe8b65ba 100644 --- a/modules/lang-groovy/build.gradle +++ b/modules/lang-groovy/build.gradle @@ -28,8 +28,8 @@ dependencies { integTest { cluster { - systemProperty 'es.script.inline', 'true' - systemProperty 'es.script.indexed', 'true' + setting 'script.inline', 'true' + setting 'script.indexed', 'true' } } @@ -38,6 +38,17 @@ thirdPartyAudit.excludes = [ // for example we do not need ivy, scripts arent allowed to download code 'com.thoughtworks.xstream.XStream', 'groovyjarjarasm.asm.util.Textifiable', + // commons-cli is referenced by groovy, even though they supposedly + // jarjar it. Since we don't use the cli, we don't need the dep. + 'org.apache.commons.cli.CommandLine', + 'org.apache.commons.cli.CommandLineParser', + 'org.apache.commons.cli.GnuParser', + 'org.apache.commons.cli.HelpFormatter', + 'org.apache.commons.cli.Option', + 'org.apache.commons.cli.OptionBuilder', + 'org.apache.commons.cli.Options', + 'org.apache.commons.cli.Parser', + 'org.apache.commons.cli.PosixParser', 'org.apache.ivy.Ivy', 'org.apache.ivy.core.event.IvyListener', 'org.apache.ivy.core.event.download.PrepareDownloadEvent', diff --git a/modules/lang-groovy/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java b/modules/lang-groovy/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java index 4d9e7a4b57b..60a8a0c1338 100644 --- a/modules/lang-groovy/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java +++ b/modules/lang-groovy/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java @@ -316,7 +316,7 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri }); } catch (Throwable e) { if (logger.isTraceEnabled()) { - logger.trace("failed to run " + compiledScript, e); + logger.trace("failed to run {}", e, compiledScript); } throw new ScriptException("failed to run " + compiledScript, e); } diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/EquivalenceTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/EquivalenceTests.java index 2fb0f9f6327..6f83746d4ce 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/EquivalenceTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/EquivalenceTests.java @@ -332,7 +332,7 @@ public class EquivalenceTests extends ESIntegTestCase { createIndex("idx"); final int numDocs = scaledRandomIntBetween(2500, 5000); - logger.info("Indexing [" + numDocs +"] docs"); + logger.info("Indexing [{}] docs", numDocs); List indexingRequests = new ArrayList<>(); for (int i = 0; i < numDocs; ++i) { indexingRequests.add(client().prepareIndex("idx", "type", Integer.toString(i)).setSource("double_value", randomDouble())); diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/ExtendedStatsTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/ExtendedStatsTests.java index 7d018adc07f..4642d4662c9 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/ExtendedStatsTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/ExtendedStatsTests.java @@ -26,6 +26,8 @@ import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.script.groovy.GroovyPlugin; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; +import org.elasticsearch.search.aggregations.bucket.missing.Missing; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.AbstractNumericTestCase; import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats; @@ -38,6 +40,8 @@ import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.search.aggregations.AggregationBuilders.extendedStats; import static org.elasticsearch.search.aggregations.AggregationBuilders.global; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; +import static org.elasticsearch.search.aggregations.AggregationBuilders.missing; +import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -498,12 +502,48 @@ public class ExtendedStatsTests extends AbstractNumericTestCase { checkUpperLowerBounds(stats, sigma); } + public void testEmptySubAggregation() { + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation(terms("value").field("value") + .subAggregation(missing("values").field("values") + .subAggregation(extendedStats("stats").field("value")))) + .execute().actionGet(); + + assertHitCount(searchResponse, 10); + + Terms terms = searchResponse.getAggregations().get("value"); + assertThat(terms, notNullValue()); + assertThat(terms.getBuckets().size(), equalTo(10)); + + for (Terms.Bucket bucket : terms.getBuckets()) { + assertThat(bucket.getDocCount(), equalTo(1L)); + + Missing missing = bucket.getAggregations().get("values"); + assertThat(missing, notNullValue()); + assertThat(missing.getDocCount(), equalTo(0L)); + + ExtendedStats stats = missing.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getName(), equalTo("stats")); + assertThat(stats.getSumOfSquares(), equalTo(0.0)); + assertThat(stats.getCount(), equalTo(0L)); + assertThat(stats.getSum(), equalTo(0.0)); + assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(Double.isNaN(stats.getStdDeviation()), is(true)); + assertThat(Double.isNaN(stats.getAvg()), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(ExtendedStats.Bounds.UPPER)), is(true)); + assertThat(Double.isNaN(stats.getStdDeviationBound(ExtendedStats.Bounds.LOWER)), is(true)); + } + } + private void assertShardExecutionState(SearchResponse response, int expectedFailures) throws Exception { ShardSearchFailure[] failures = response.getShardFailures(); if (failures.length != expectedFailures) { for (ShardSearchFailure failure : failures) { - logger.error("Shard Failure: {}", failure.reason(), failure.toString()); + logger.error("Shard Failure: {}", failure.getCause(), failure); } fail("Unexpected shard failures!"); } @@ -515,4 +555,4 @@ public class ExtendedStatsTests extends AbstractNumericTestCase { assertThat(stats.getStdDeviationBound(ExtendedStats.Bounds.LOWER), equalTo(stats.getAvg() - (stats.getStdDeviation() * sigma))); } -} \ No newline at end of file +} diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/FunctionScoreTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/FunctionScoreTests.java index fffeabcb807..4689d5fba03 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/FunctionScoreTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/FunctionScoreTests.java @@ -169,14 +169,21 @@ public class FunctionScoreTests extends ESIntegTestCase { } } + /** make sure min_score works if functions is empty, see https://github.com/elastic/elasticsearch/issues/10253 */ public void testWithEmptyFunctions() throws IOException, ExecutionException, InterruptedException { assertAcked(prepareCreate("test")); ensureYellow(); index("test", "testtype", "1", jsonBuilder().startObject().field("text", "test text").endObject()); refresh(); - // make sure that min_score works if functions is empty, see https://github.com/elastic/elasticsearch/issues/10253 - float termQueryScore = 0.19178301f; + SearchResponse termQuery = client().search( + searchRequest().source( + searchSource().explain(true).query( + termQuery("text", "text")))).get(); + assertSearchResponse(termQuery); + assertThat(termQuery.getHits().totalHits(), equalTo(1L)); + float termQueryScore = termQuery.getHits().getAt(0).getScore(); + for (CombineFunction combineFunction : CombineFunction.values()) { testMinScoreApplied(combineFunction, termQueryScore); } diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/GeoShapeIntegrationTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/GeoShapeIntegrationTests.java index 98a23b3e1fd..8a86a0a1fb4 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/GeoShapeIntegrationTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/GeoShapeIntegrationTests.java @@ -76,7 +76,7 @@ public class GeoShapeIntegrationTests extends ESIntegTestCase { // left orientation test IndicesService indicesService = internalCluster().getInstance(IndicesService.class, findNodeName(idxName)); - IndexService indexService = indicesService.indexService(idxName); + IndexService indexService = indicesService.indexService(resolveIndex(idxName)); MappedFieldType fieldType = indexService.mapperService().fullName("location"); assertThat(fieldType, instanceOf(GeoShapeFieldMapper.GeoShapeFieldType.class)); @@ -88,7 +88,7 @@ public class GeoShapeIntegrationTests extends ESIntegTestCase { // right orientation test indicesService = internalCluster().getInstance(IndicesService.class, findNodeName(idxName+"2")); - indexService = indicesService.indexService(idxName+"2"); + indexService = indicesService.indexService(resolveIndex((idxName+"2"))); fieldType = indexService.mapperService().fullName("location"); assertThat(fieldType, instanceOf(GeoShapeFieldMapper.GeoShapeFieldType.class)); diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/IndicesRequestTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/IndicesRequestTests.java index 748345fd447..6446f6e0b94 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/IndicesRequestTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/IndicesRequestTests.java @@ -67,10 +67,6 @@ import org.elasticsearch.action.get.MultiGetAction; import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.index.IndexAction; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.percolate.MultiPercolateAction; -import org.elasticsearch.action.percolate.MultiPercolateRequest; -import org.elasticsearch.action.percolate.PercolateAction; -import org.elasticsearch.action.percolate.PercolateRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; @@ -85,7 +81,6 @@ import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; @@ -93,6 +88,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.groovy.GroovyPlugin; import org.elasticsearch.search.action.SearchTransportService; +import org.elasticsearch.search.suggest.SuggestBuilder; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; @@ -370,7 +366,7 @@ public class IndicesRequestTests extends ESIntegTestCase { internalCluster().coordOnlyNodeClient().admin().indices().flush(flushRequest).actionGet(); clearInterceptedActions(); - String[] indices = new IndexNameExpressionResolver(Settings.EMPTY).concreteIndices(client().admin().cluster().prepareState().get().getState(), flushRequest); + String[] indices = new IndexNameExpressionResolver(Settings.EMPTY).concreteIndexNames(client().admin().cluster().prepareState().get().getState(), flushRequest); assertIndicesSubset(Arrays.asList(indices), indexShardActions); } @@ -393,7 +389,7 @@ public class IndicesRequestTests extends ESIntegTestCase { internalCluster().coordOnlyNodeClient().admin().indices().refresh(refreshRequest).actionGet(); clearInterceptedActions(); - String[] indices = new IndexNameExpressionResolver(Settings.EMPTY).concreteIndices(client().admin().cluster().prepareState().get().getState(), refreshRequest); + String[] indices = new IndexNameExpressionResolver(Settings.EMPTY).concreteIndexNames(client().admin().cluster().prepareState().get().getState(), refreshRequest); assertIndicesSubset(Arrays.asList(indices), indexShardActions); } @@ -445,7 +441,7 @@ public class IndicesRequestTests extends ESIntegTestCase { String suggestAction = SuggestAction.NAME + "[s]"; interceptTransportActions(suggestAction); - SuggestRequest suggestRequest = new SuggestRequest(randomIndicesOrAliases()); + SuggestRequest suggestRequest = new SuggestRequest(randomIndicesOrAliases()).suggest(new SuggestBuilder()); internalCluster().coordOnlyNodeClient().suggest(suggestRequest).actionGet(); clearInterceptedActions(); @@ -463,51 +459,6 @@ public class IndicesRequestTests extends ESIntegTestCase { assertSameIndices(validateQueryRequest, validateQueryShardAction); } - public void testPercolate() { - String percolateShardAction = PercolateAction.NAME + "[s]"; - interceptTransportActions(percolateShardAction); - - client().prepareIndex("test-get", "type", "1").setSource("field","value").get(); - - PercolateRequest percolateRequest = new PercolateRequest().indices(randomIndicesOrAliases()).documentType("type"); - if (randomBoolean()) { - percolateRequest.getRequest(new GetRequest("test-get", "type", "1")); - } else { - percolateRequest.source("\"field\":\"value\""); - } - internalCluster().coordOnlyNodeClient().percolate(percolateRequest).actionGet(); - - clearInterceptedActions(); - assertSameIndices(percolateRequest, percolateShardAction); - } - - public void testMultiPercolate() { - String multiPercolateShardAction = MultiPercolateAction.NAME + "[shard][s]"; - interceptTransportActions(multiPercolateShardAction); - - client().prepareIndex("test-get", "type", "1").setSource("field", "value").get(); - - MultiPercolateRequest multiPercolateRequest = new MultiPercolateRequest(); - List indices = new ArrayList<>(); - int numRequests = iterations(1, 30); - for (int i = 0; i < numRequests; i++) { - String[] indicesOrAliases = randomIndicesOrAliases(); - Collections.addAll(indices, indicesOrAliases); - PercolateRequest percolateRequest = new PercolateRequest().indices(indicesOrAliases).documentType("type"); - if (randomBoolean()) { - percolateRequest.getRequest(new GetRequest("test-get", "type", "1")); - } else { - percolateRequest.source("\"field\":\"value\""); - } - multiPercolateRequest.add(percolateRequest); - } - - internalCluster().coordOnlyNodeClient().multiPercolate(multiPercolateRequest).actionGet(); - - clearInterceptedActions(); - assertIndicesSubset(indices, multiPercolateShardAction); - } - public void testOpenIndex() { interceptTransportActions(OpenIndexAction.NAME); @@ -785,8 +736,8 @@ public class IndicesRequestTests extends ESIntegTestCase { private final Map> requests = new HashMap<>(); @Inject - public InterceptingTransportService(Settings settings, Transport transport, ThreadPool threadPool, NamedWriteableRegistry namedWriteableRegistry) { - super(settings, transport, threadPool, namedWriteableRegistry); + public InterceptingTransportService(Settings settings, Transport transport, ThreadPool threadPool) { + super(settings, transport, threadPool); } synchronized List consumeRequests(String action) { diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/RandomScoreFunctionTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/RandomScoreFunctionTests.java index 67f7d6ff0da..592fb362bc2 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/RandomScoreFunctionTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/RandomScoreFunctionTests.java @@ -316,10 +316,9 @@ public class RandomScoreFunctionTests extends ESIntegTestCase { } } - System.out.println(); - System.out.println("max repeat: " + maxRepeat); - System.out.println("avg repeat: " + sumRepeat / (double) filled); - System.out.println("distribution: " + filled / (double) count); + logger.info("max repeat: {}", maxRepeat); + logger.info("avg repeat: {}", sumRepeat / (double) filled); + logger.info("distribution: {}", filled / (double) count); int percentile50 = filled / 2; int percentile25 = (filled / 4); @@ -333,18 +332,18 @@ public class RandomScoreFunctionTests extends ESIntegTestCase { } sum += i * matrix[i]; if (percentile50 == 0) { - System.out.println("median: " + i); + logger.info("median: {}", i); } else if (percentile25 == 0) { - System.out.println("percentile_25: " + i); + logger.info("percentile_25: {}", i); } else if (percentile75 == 0) { - System.out.println("percentile_75: " + i); + logger.info("percentile_75: {}", i); } percentile50--; percentile25--; percentile75--; } - System.out.println("mean: " + sum / (double) count); + logger.info("mean: {}", sum / (double) count); } } diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/ScriptQuerySearchTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/ScriptQuerySearchTests.java index 752165902ed..aa47fe98bb3 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/ScriptQuerySearchTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/ScriptQuerySearchTests.java @@ -50,8 +50,8 @@ public class ScriptQuerySearchTests extends ESIntegTestCase { } @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)) + public Settings indexSettings() { + return Settings.settingsBuilder().put(super.indexSettings()) // aggressive filter caching so that we can assert on the number of iterations of the script filters .put(IndexModule.INDEX_QUERY_CACHE_TYPE_SETTING.getKey(), IndexModule.INDEX_QUERY_CACHE) .put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), true) @@ -80,9 +80,9 @@ public class ScriptQuerySearchTests extends ESIntegTestCase { assertThat(response.getHits().totalHits(), equalTo(2L)); assertThat(response.getHits().getAt(0).id(), equalTo("2")); - assertThat((Double) response.getHits().getAt(0).fields().get("sNum1").values().get(0), equalTo(2.0)); + assertThat(response.getHits().getAt(0).fields().get("sNum1").values().get(0), equalTo(2.0)); assertThat(response.getHits().getAt(1).id(), equalTo("3")); - assertThat((Double) response.getHits().getAt(1).fields().get("sNum1").values().get(0), equalTo(3.0)); + assertThat(response.getHits().getAt(1).fields().get("sNum1").values().get(0), equalTo(3.0)); Map params = new HashMap<>(); params.put("param1", 2); @@ -95,7 +95,7 @@ public class ScriptQuerySearchTests extends ESIntegTestCase { assertThat(response.getHits().totalHits(), equalTo(1L)); assertThat(response.getHits().getAt(0).id(), equalTo("3")); - assertThat((Double) response.getHits().getAt(0).fields().get("sNum1").values().get(0), equalTo(3.0)); + assertThat(response.getHits().getAt(0).fields().get("sNum1").values().get(0), equalTo(3.0)); params = new HashMap<>(); params.put("param1", -1); @@ -108,11 +108,11 @@ public class ScriptQuerySearchTests extends ESIntegTestCase { assertThat(response.getHits().totalHits(), equalTo(3L)); assertThat(response.getHits().getAt(0).id(), equalTo("1")); - assertThat((Double) response.getHits().getAt(0).fields().get("sNum1").values().get(0), equalTo(1.0)); + assertThat(response.getHits().getAt(0).fields().get("sNum1").values().get(0), equalTo(1.0)); assertThat(response.getHits().getAt(1).id(), equalTo("2")); - assertThat((Double) response.getHits().getAt(1).fields().get("sNum1").values().get(0), equalTo(2.0)); + assertThat(response.getHits().getAt(1).fields().get("sNum1").values().get(0), equalTo(2.0)); assertThat(response.getHits().getAt(2).id(), equalTo("3")); - assertThat((Double) response.getHits().getAt(2).fields().get("sNum1").values().get(0), equalTo(3.0)); + assertThat(response.getHits().getAt(2).fields().get("sNum1").values().get(0), equalTo(3.0)); } private static AtomicInteger scriptCounter = new AtomicInteger(0); diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SearchStatsTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SearchStatsTests.java index d2dbd997402..95a2691d1c4 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SearchStatsTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SearchStatsTests.java @@ -116,7 +116,7 @@ public class SearchStatsTests extends ESIntegTestCase { } IndicesStatsResponse indicesStats = client().admin().indices().prepareStats().execute().actionGet(); - logger.debug("###### indices search stats: " + indicesStats.getTotal().getSearch()); + logger.debug("###### indices search stats: {}", indicesStats.getTotal().getSearch()); assertThat(indicesStats.getTotal().getSearch().getTotal().getQueryCount(), greaterThan(0L)); assertThat(indicesStats.getTotal().getSearch().getTotal().getQueryTimeInMillis(), greaterThan(0L)); assertThat(indicesStats.getTotal().getSearch().getTotal().getFetchCount(), greaterThan(0L)); diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java index 8cb8ebcc0a4..6f1e49d235b 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.groovy.GroovyPlugin; import org.elasticsearch.search.sort.ScriptSortBuilder; +import org.elasticsearch.search.sort.ScriptSortBuilder.ScriptSortType; import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; @@ -109,7 +110,7 @@ public class SimpleSortTests extends ESIntegTestCase { SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) .setSize(size) - .addSort(new ScriptSortBuilder(new Script("doc['str_value'].value"), "string")).execute().actionGet(); + .addSort(new ScriptSortBuilder(new Script("doc['str_value'].value"), ScriptSortType.STRING)).execute().actionGet(); assertHitCount(searchResponse, 10); assertThat(searchResponse.getHits().hits().length, equalTo(size)); for (int i = 0; i < size; i++) { @@ -217,7 +218,7 @@ public class SimpleSortTests extends ESIntegTestCase { assertThat(searchResponse.getHits().getTotalHits(), equalTo(20L)); for (int i = 0; i < 10; i++) { - assertThat("res: " + i + " id: " + searchResponse.getHits().getAt(i).getId(), (Double) searchResponse.getHits().getAt(i).field("min").value(), closeTo((double) i, TOLERANCE)); + assertThat("res: " + i + " id: " + searchResponse.getHits().getAt(i).getId(), (Double) searchResponse.getHits().getAt(i).field("min").value(), closeTo(i, TOLERANCE)); } } @@ -326,7 +327,7 @@ public class SimpleSortTests extends ESIntegTestCase { } refresh(); SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.scriptSort(new Script("\u0027\u0027"), "string")).setSize(10).execute().actionGet(); + .addSort(SortBuilders.scriptSort(new Script("\u0027\u0027"), ScriptSortType.STRING)).setSize(10).execute().actionGet(); assertNoFailures(searchResponse); } } diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/StatsTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/StatsTests.java index 4934d2ae6c4..b06d3395b2b 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/StatsTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/StatsTests.java @@ -404,10 +404,10 @@ public class StatsTests extends AbstractNumericTestCase { ShardSearchFailure[] failures = response.getShardFailures(); if (failures.length != expectedFailures) { for (ShardSearchFailure failure : failures) { - logger.error("Shard Failure: {}", failure.reason(), failure.toString()); + logger.error("Shard Failure: {}", failure.getCause(), failure); } fail("Unexpected shard failures!"); } assertThat("Not all shards are initialized", response.getSuccessfulShards(), equalTo(response.getTotalShards())); } -} \ No newline at end of file +} diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/StringTermsTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/StringTermsTests.java index edf8be49ddc..346d19d4ce5 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/StringTermsTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/StringTermsTests.java @@ -243,7 +243,7 @@ public class StringTermsTests extends AbstractTermsTestCase { ExecutionMode[] executionModes = new ExecutionMode[] { null, ExecutionMode.GLOBAL_ORDINALS, ExecutionMode.GLOBAL_ORDINALS_HASH, ExecutionMode.GLOBAL_ORDINALS_LOW_CARDINALITY }; for (ExecutionMode executionMode : executionModes) { - logger.info("Execution mode:" + executionMode); + logger.info("Execution mode: {}", executionMode); SearchResponse response = client() .prepareSearch("idx") .setTypes("type") diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovyScriptTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovyScriptTests.java index dcc3abf2e1c..4e13219bae5 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovyScriptTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovyScriptTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.sort.ScriptSortBuilder.ScriptSortType; import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.test.ESIntegTestCase; @@ -68,7 +69,8 @@ public class GroovyScriptTests extends ESIntegTestCase { public void assertScript(String scriptString) { Script script = new Script(scriptString, ScriptType.INLINE, "groovy", null); SearchResponse resp = client().prepareSearch("test") - .setSource(new SearchSourceBuilder().query(QueryBuilders.matchAllQuery()).sort(SortBuilders.scriptSort(script, "number"))) + .setSource(new SearchSourceBuilder().query(QueryBuilders.matchAllQuery()).sort(SortBuilders. + scriptSort(script, ScriptSortType.NUMBER))) .get(); assertNoFailures(resp); } diff --git a/modules/lang-mustache/build.gradle b/modules/lang-mustache/build.gradle index f41ffb90128..8eed31dd668 100644 --- a/modules/lang-mustache/build.gradle +++ b/modules/lang-mustache/build.gradle @@ -28,7 +28,7 @@ dependencies { integTest { cluster { - systemProperty 'es.script.inline', 'true' - systemProperty 'es.script.indexed', 'true' + setting 'script.inline', 'true' + setting 'script.indexed', 'true' } } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java index 766c5bff9c4..647a727b2dd 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java @@ -191,7 +191,7 @@ public final class MustacheScriptEngineService extends AbstractComponent impleme } }); } catch (Exception e) { - logger.error("Error running " + template, e); + logger.error("Error running {}", e, template); throw new ScriptException("Error running " + template, e); } return result.bytes(); diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/messy/tests/SuggestSearchTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/messy/tests/SuggestSearchTests.java index f17bb7ea59f..94f60f8802c 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/messy/tests/SuggestSearchTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/messy/tests/SuggestSearchTests.java @@ -26,7 +26,6 @@ import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchQuery; import static org.elasticsearch.search.suggest.SuggestBuilders.phraseSuggestion; import static org.elasticsearch.search.suggest.SuggestBuilders.termSuggestion; -import static org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder.candidateGenerator; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSuggestion; @@ -50,6 +49,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.concurrent.ExecutionException; import org.elasticsearch.ElasticsearchException; @@ -67,12 +67,17 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.mustache.MustachePlugin; +import org.elasticsearch.search.suggest.SortBy; import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.search.suggest.SuggestBuilder; -import org.elasticsearch.search.suggest.SuggestBuilder.SuggestionBuilder; +import org.elasticsearch.search.suggest.SuggestionBuilder; import org.elasticsearch.search.suggest.phrase.DirectCandidateGeneratorBuilder; +import org.elasticsearch.search.suggest.phrase.Laplace; +import org.elasticsearch.search.suggest.phrase.LinearInterpolation; import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder; +import org.elasticsearch.search.suggest.phrase.StupidBackoff; import org.elasticsearch.search.suggest.term.TermSuggestionBuilder; +import org.elasticsearch.search.suggest.term.TermSuggestionBuilder.SuggestMode; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; @@ -99,12 +104,11 @@ public class SuggestSearchTests extends ESIntegTestCase { index("test", "type1", "4", "text", "abcc"); refresh(); - TermSuggestionBuilder termSuggest = termSuggestion("test") - .suggestMode("always") // Always, otherwise the results can vary between requests. - .text("abcd") - .field("text"); + TermSuggestionBuilder termSuggest = termSuggestion("text") + .suggestMode(TermSuggestionBuilder.SuggestMode.ALWAYS) // Always, otherwise the results can vary between requests. + .text("abcd"); logger.info("--> run suggestions with one index"); - searchSuggest( termSuggest); + searchSuggest("test", termSuggest); createIndex("test_1"); ensureGreen(); @@ -113,13 +117,12 @@ public class SuggestSearchTests extends ESIntegTestCase { index("test_1", "type1", "3", "text", "ab bd"); index("test_1", "type1", "4", "text", "ab cc"); refresh(); - termSuggest = termSuggestion("test") - .suggestMode("always") // Always, otherwise the results can vary between requests. + termSuggest = termSuggestion("text") + .suggestMode(SuggestMode.ALWAYS) // Always, otherwise the results can vary between requests. .text("ab cd") - .minWordLength(1) - .field("text"); + .minWordLength(1); logger.info("--> run suggestions with two indices"); - searchSuggest( termSuggest); + searchSuggest("test", termSuggest); XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1") @@ -140,14 +143,13 @@ public class SuggestSearchTests extends ESIntegTestCase { index("test_2", "type1", "4", "text", "abcc"); refresh(); - termSuggest = termSuggestion("test") - .suggestMode("always") // Always, otherwise the results can vary between requests. + termSuggest = termSuggestion("text") + .suggestMode(SuggestMode.ALWAYS) // Always, otherwise the results can vary between requests. .text("ab cd") - .minWordLength(1) - .field("text"); + .minWordLength(1); logger.info("--> run suggestions with three indices"); try { - searchSuggest( termSuggest); + searchSuggest("test", termSuggest); fail(" can not suggest across multiple indices with different analysis chains"); } catch (ReduceSearchPhaseException ex) { assertThat(ex.getCause(), instanceOf(IllegalStateException.class)); @@ -160,14 +162,13 @@ public class SuggestSearchTests extends ESIntegTestCase { } - termSuggest = termSuggestion("test") - .suggestMode("always") // Always, otherwise the results can vary between requests. + termSuggest = termSuggestion("text") + .suggestMode(SuggestMode.ALWAYS) // Always, otherwise the results can vary between requests. .text("ABCD") - .minWordLength(1) - .field("text"); + .minWordLength(1); logger.info("--> run suggestions with four indices"); try { - searchSuggest( termSuggest); + searchSuggest("test", termSuggest); fail(" can not suggest across multiple indices with different analysis chains"); } catch (ReduceSearchPhaseException ex) { assertThat(ex.getCause(), instanceOf(IllegalStateException.class)); @@ -214,17 +215,27 @@ public class SuggestSearchTests extends ESIntegTestCase { refresh(); DirectCandidateGeneratorBuilder generator = candidateGenerator("name").prefixLength(0).minWordLength(0).suggestMode("always").maxEdits(2); - PhraseSuggestionBuilder phraseSuggestion = phraseSuggestion("did_you_mean").field("name.shingled") + PhraseSuggestionBuilder phraseSuggestion = phraseSuggestion("name.shingled") .addCandidateGenerator(generator) .gramSize(3); - Suggest searchSuggest = searchSuggest( "ice tea", phraseSuggestion); + Suggest searchSuggest = searchSuggest("ice tea", "did_you_mean", phraseSuggestion); assertSuggestion(searchSuggest, 0, "did_you_mean", "iced tea"); generator.suggestMode(null); - searchSuggest = searchSuggest( "ice tea", phraseSuggestion); + searchSuggest = searchSuggest( "ice tea", "did_you_mean", phraseSuggestion); assertSuggestionSize(searchSuggest, 0, 0, "did_you_mean"); } + /** + * Creates a new {@link DirectCandidateGeneratorBuilder} + * + * @param field + * the field this candidate generator operates on. + */ + private DirectCandidateGeneratorBuilder candidateGenerator(String field) { + return new DirectCandidateGeneratorBuilder(field); + } + // see #2729 public void testSizeOneShard() throws Exception { prepareCreate("test").setSettings( @@ -240,16 +251,15 @@ public class SuggestSearchTests extends ESIntegTestCase { SearchResponse search = client().prepareSearch().setQuery(matchQuery("text", "spellchecker")).get(); assertThat("didn't ask for suggestions but got some", search.getSuggest(), nullValue()); - TermSuggestionBuilder termSuggestion = termSuggestion("test") - .suggestMode("always") // Always, otherwise the results can vary between requests. + TermSuggestionBuilder termSuggestion = termSuggestion("text") + .suggestMode(SuggestMode.ALWAYS) // Always, otherwise the results can vary between requests. .text("abcd") - .field("text") .size(10); - Suggest suggest = searchSuggest( termSuggestion); + Suggest suggest = searchSuggest("test", termSuggestion); assertSuggestion(suggest, 0, "test", 10, "abc0"); termSuggestion.text("abcd").shardSize(5); - suggest = searchSuggest( termSuggestion); + suggest = searchSuggest("test", termSuggestion); assertSuggestion(suggest, 0, "test", 5, "abc0"); } @@ -283,21 +293,23 @@ public class SuggestSearchTests extends ESIntegTestCase { client().prepareIndex("test", "type1").setSource("name", "I like ice cream.")); refresh(); - PhraseSuggestionBuilder phraseSuggestion = phraseSuggestion("did_you_mean").field("name.shingled") - .addCandidateGenerator(PhraseSuggestionBuilder.candidateGenerator("name").prefixLength(0).minWordLength(0).suggestMode("always").maxEdits(2)) + PhraseSuggestionBuilder phraseSuggestion = phraseSuggestion("name.shingled") + .addCandidateGenerator(candidateGenerator("name").prefixLength(0).minWordLength(0).suggestMode("always").maxEdits(2)) .gramSize(3); - Suggest searchSuggest = searchSuggest( "ice tea", phraseSuggestion); + Suggest searchSuggest = searchSuggest("ice tea", "did_you_mean", phraseSuggestion); assertSuggestion(searchSuggest, 0, 0, "did_you_mean", "iced tea"); - phraseSuggestion.field("nosuchField"); + phraseSuggestion = phraseSuggestion("nosuchField") + .addCandidateGenerator(candidateGenerator("name").prefixLength(0).minWordLength(0).suggestMode("always").maxEdits(2)) + .gramSize(3); { SearchRequestBuilder searchBuilder = client().prepareSearch().setSize(0); - searchBuilder.suggest(new SuggestBuilder().setText("tetsting sugestion").addSuggestion(phraseSuggestion)); + searchBuilder.suggest(new SuggestBuilder().setGlobalText("tetsting sugestion").addSuggestion("did_you_mean", phraseSuggestion)); assertThrows(searchBuilder, SearchPhaseExecutionException.class); } { SearchRequestBuilder searchBuilder = client().prepareSearch().setSize(0); - searchBuilder.suggest(new SuggestBuilder().setText("tetsting sugestion").addSuggestion(phraseSuggestion)); + searchBuilder.suggest(new SuggestBuilder().setGlobalText("tetsting sugestion").addSuggestion("did_you_mean", phraseSuggestion)); assertThrows(searchBuilder, SearchPhaseExecutionException.class); } } @@ -315,15 +327,14 @@ public class SuggestSearchTests extends ESIntegTestCase { SearchResponse search = client().prepareSearch().setQuery(matchQuery("text", "spellcecker")).get(); assertThat("didn't ask for suggestions but got some", search.getSuggest(), nullValue()); - TermSuggestionBuilder termSuggest = termSuggestion("test") - .suggestMode("always") // Always, otherwise the results can vary between requests. - .text("abcd") - .field("text"); - Suggest suggest = searchSuggest( termSuggest); + TermSuggestionBuilder termSuggest = termSuggestion("text") + .suggestMode(SuggestMode.ALWAYS) // Always, otherwise the results can vary between requests. + .text("abcd"); + Suggest suggest = searchSuggest("test", termSuggest); assertSuggestion(suggest, 0, "test", "aacd", "abbd", "abcc"); assertThat(suggest.getSuggestion("test").getEntries().get(0).getText().string(), equalTo("abcd")); - suggest = searchSuggest( termSuggest); + suggest = searchSuggest("test", termSuggest); assertSuggestion(suggest, 0, "test", "aacd","abbd", "abcc"); assertThat(suggest.getSuggestion("test").getEntries().get(0).getText().string(), equalTo("abcd")); } @@ -332,18 +343,17 @@ public class SuggestSearchTests extends ESIntegTestCase { createIndex("test"); ensureGreen(); - index("test", "type1", "1", "foo", "bar"); + index("test", "type1", "1", "text", "bar"); refresh(); - TermSuggestionBuilder termSuggest = termSuggestion("test") - .suggestMode("always") // Always, otherwise the results can vary between requests. - .text("abcd") - .field("text"); - Suggest suggest = searchSuggest( termSuggest); + TermSuggestionBuilder termSuggest = termSuggestion("text") + .suggestMode(SuggestMode.ALWAYS) // Always, otherwise the results can vary between requests. + .text("abcd"); + Suggest suggest = searchSuggest("test", termSuggest); assertSuggestionSize(suggest, 0, 0, "test"); assertThat(suggest.getSuggestion("test").getEntries().get(0).getText().string(), equalTo("abcd")); - suggest = searchSuggest( termSuggest); + suggest = searchSuggest("test", termSuggest); assertSuggestionSize(suggest, 0, 0, "test"); assertThat(suggest.getSuggestion("test").getEntries().get(0).getText().string(), equalTo("abcd")); } @@ -358,16 +368,17 @@ public class SuggestSearchTests extends ESIntegTestCase { index("test", "typ1", "4", "field1", "prefix_abcc", "field2", "prefix_eggg"); refresh(); - Suggest suggest = searchSuggest( - termSuggestion("size1") - .size(1).text("prefix_abcd").maxTermFreq(10).prefixLength(1).minDocFreq(0) - .field("field1").suggestMode("always"), - termSuggestion("field2") - .field("field2").text("prefix_eeeh prefix_efgh") - .maxTermFreq(10).minDocFreq(0).suggestMode("always"), - termSuggestion("accuracy") - .field("field2").text("prefix_efgh").setAccuracy(1f) - .maxTermFreq(10).minDocFreq(0).suggestMode("always")); + Map> suggestions = new HashMap<>(); + suggestions.put("size1", termSuggestion("field1") + .size(1).text("prefix_abcd").maxTermFreq(10).prefixLength(1).minDocFreq(0) + .suggestMode(SuggestMode.ALWAYS)); + suggestions.put("field2", termSuggestion("field2") + .text("prefix_eeeh prefix_efgh") + .maxTermFreq(10).minDocFreq(0).suggestMode(SuggestMode.ALWAYS)); + suggestions.put("accuracy", termSuggestion("field2") + .text("prefix_efgh").accuracy(1f) + .maxTermFreq(10).minDocFreq(0).suggestMode(SuggestMode.ALWAYS)); + Suggest suggest = searchSuggest(null, 0, suggestions); assertSuggestion(suggest, 0, "size1", "prefix_aacd"); assertThat(suggest.getSuggestion("field2").getEntries().get(0).getText().string(), equalTo("prefix_eeeh")); assertSuggestion(suggest, 0, "field2", "prefix_efgh"); @@ -401,17 +412,18 @@ public class SuggestSearchTests extends ESIntegTestCase { } refresh(); - Suggest suggest = searchSuggest( "prefix_abcd", - termSuggestion("size3SortScoreFirst") - .size(3).minDocFreq(0).field("field1").suggestMode("always"), - termSuggestion("size10SortScoreFirst") - .size(10).minDocFreq(0).field("field1").suggestMode("always").shardSize(50), - termSuggestion("size3SortScoreFirstMaxEdits1") - .maxEdits(1) - .size(10).minDocFreq(0).field("field1").suggestMode("always"), - termSuggestion("size10SortFrequencyFirst") - .size(10).sort("frequency").shardSize(1000) - .minDocFreq(0).field("field1").suggestMode("always")); + Map> suggestions = new HashMap<>(); + suggestions.put("size3SortScoreFirst", termSuggestion("field1") + .size(3).minDocFreq(0).suggestMode(SuggestMode.ALWAYS)); + suggestions.put("size10SortScoreFirst", termSuggestion("field1") + .size(10).minDocFreq(0).suggestMode(SuggestMode.ALWAYS).shardSize(50)); + suggestions.put("size3SortScoreFirstMaxEdits1", termSuggestion("field1") + .maxEdits(1) + .size(10).minDocFreq(0).suggestMode(SuggestMode.ALWAYS)); + suggestions.put("size10SortFrequencyFirst", termSuggestion("field1") + .size(10).sort(SortBy.FREQUENCY).shardSize(1000) + .minDocFreq(0).suggestMode(SuggestMode.ALWAYS)); + Suggest suggest = searchSuggest("prefix_abcd", 0, suggestions); // The commented out assertions fail sometimes because suggestions are based off of shard frequencies instead of index frequencies. assertSuggestion(suggest, 0, "size3SortScoreFirst", "prefix_aacd", "prefix_abcc", "prefix_accd"); @@ -435,9 +447,9 @@ public class SuggestSearchTests extends ESIntegTestCase { index("test", "typ1", "1", "body", "this is a test"); refresh(); - Suggest searchSuggest = searchSuggest( "a an the", - phraseSuggestion("simple_phrase").field("body").gramSize(1) - .addCandidateGenerator(PhraseSuggestionBuilder.candidateGenerator("body").minWordLength(1).suggestMode("always")) + Suggest searchSuggest = searchSuggest( "a an the", "simple_phrase", + phraseSuggestion("body").gramSize(1) + .addCandidateGenerator(candidateGenerator("body").minWordLength(1).suggestMode("always")) .size(1)); assertSuggestionSize(searchSuggest, 0, 0, "simple_phrase"); } @@ -471,15 +483,15 @@ public class SuggestSearchTests extends ESIntegTestCase { index("test", "type1", "3", "body", "hello words"); refresh(); - Suggest searchSuggest = searchSuggest( "hello word", - phraseSuggestion("simple_phrase").field("body") - .addCandidateGenerator(PhraseSuggestionBuilder.candidateGenerator("body").prefixLength(4).minWordLength(1).suggestMode("always")) + Suggest searchSuggest = searchSuggest( "hello word", "simple_phrase", + phraseSuggestion("body") + .addCandidateGenerator(candidateGenerator("body").prefixLength(4).minWordLength(1).suggestMode("always")) .size(1).confidence(1.0f)); assertSuggestion(searchSuggest, 0, "simple_phrase", "hello words"); - searchSuggest = searchSuggest( "hello word", - phraseSuggestion("simple_phrase").field("body") - .addCandidateGenerator(PhraseSuggestionBuilder.candidateGenerator("body").prefixLength(2).minWordLength(1).suggestMode("always")) + searchSuggest = searchSuggest( "hello word", "simple_phrase", + phraseSuggestion("body") + .addCandidateGenerator(candidateGenerator("body").prefixLength(2).minWordLength(1).suggestMode("always")) .size(1).confidence(1.0f)); assertSuggestion(searchSuggest, 0, "simple_phrase", "hello world"); } @@ -526,88 +538,87 @@ public class SuggestSearchTests extends ESIntegTestCase { } refresh(); - PhraseSuggestionBuilder phraseSuggest = phraseSuggestion("simple_phrase") - .field("bigram").gramSize(2).analyzer("body") + PhraseSuggestionBuilder phraseSuggest = phraseSuggestion("bigram").gramSize(2).analyzer("body") .addCandidateGenerator(candidateGenerator("body").minWordLength(1).suggestMode("always")) .size(1); - Suggest searchSuggest = searchSuggest( "american ame", phraseSuggest); + Suggest searchSuggest = searchSuggest( "american ame", "simple_phrase", phraseSuggest); assertSuggestion(searchSuggest, 0, "simple_phrase", "american ace"); assertThat(searchSuggest.getSuggestion("simple_phrase").getEntries().get(0).getText().string(), equalTo("american ame")); phraseSuggest.realWordErrorLikelihood(0.95f); - searchSuggest = searchSuggest( "Xor the Got-Jewel", phraseSuggest); + searchSuggest = searchSuggest( "Xor the Got-Jewel", "simple_phrase", phraseSuggest); assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel"); // Check the "text" field this one time. assertThat(searchSuggest.getSuggestion("simple_phrase").getEntries().get(0).getText().string(), equalTo("Xor the Got-Jewel")); // Ask for highlighting phraseSuggest.highlight("", ""); - searchSuggest = searchSuggest( "Xor the Got-Jewel", phraseSuggest); + searchSuggest = searchSuggest( "Xor the Got-Jewel", "simple_phrase", phraseSuggest); assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel"); assertThat(searchSuggest.getSuggestion("simple_phrase").getEntries().get(0).getOptions().get(0).getHighlighted().string(), equalTo("xorr the god jewel")); // pass in a correct phrase phraseSuggest.highlight(null, null).confidence(0f).size(1).maxErrors(0.5f); - searchSuggest = searchSuggest( "Xorr the God-Jewel", phraseSuggest); + searchSuggest = searchSuggest( "Xorr the God-Jewel", "simple_phrase", phraseSuggest); assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel"); // pass in a correct phrase - set confidence to 2 phraseSuggest.confidence(2f); - searchSuggest = searchSuggest( "Xorr the God-Jewel", phraseSuggest); + searchSuggest = searchSuggest( "Xorr the God-Jewel", "simple_phrase", phraseSuggest); assertSuggestionSize(searchSuggest, 0, 0, "simple_phrase"); // pass in a correct phrase - set confidence to 0.99 phraseSuggest.confidence(0.99f); - searchSuggest = searchSuggest( "Xorr the God-Jewel", phraseSuggest); + searchSuggest = searchSuggest( "Xorr the God-Jewel", "simple_phrase", phraseSuggest); assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel"); //test reverse suggestions with pre & post filter phraseSuggest .addCandidateGenerator(candidateGenerator("body").minWordLength(1).suggestMode("always")) .addCandidateGenerator(candidateGenerator("body_reverse").minWordLength(1).suggestMode("always").preFilter("reverse").postFilter("reverse")); - searchSuggest = searchSuggest( "xor the yod-Jewel", phraseSuggest); + searchSuggest = searchSuggest( "xor the yod-Jewel", "simple_phrase", phraseSuggest); assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel"); // set all mass to trigrams (not indexed) phraseSuggest.clearCandidateGenerators() .addCandidateGenerator(candidateGenerator("body").minWordLength(1).suggestMode("always")) - .smoothingModel(new PhraseSuggestionBuilder.LinearInterpolation(1,0,0)); - searchSuggest = searchSuggest( "Xor the Got-Jewel", phraseSuggest); + .smoothingModel(new LinearInterpolation(1,0,0)); + searchSuggest = searchSuggest( "Xor the Got-Jewel", "simple_phrase", phraseSuggest); assertSuggestionSize(searchSuggest, 0, 0, "simple_phrase"); // set all mass to bigrams - phraseSuggest.smoothingModel(new PhraseSuggestionBuilder.LinearInterpolation(0,1,0)); - searchSuggest = searchSuggest( "Xor the Got-Jewel", phraseSuggest); + phraseSuggest.smoothingModel(new LinearInterpolation(0,1,0)); + searchSuggest = searchSuggest( "Xor the Got-Jewel", "simple_phrase", phraseSuggest); assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel"); // distribute mass - phraseSuggest.smoothingModel(new PhraseSuggestionBuilder.LinearInterpolation(0.4,0.4,0.2)); - searchSuggest = searchSuggest( "Xor the Got-Jewel", phraseSuggest); + phraseSuggest.smoothingModel(new LinearInterpolation(0.4,0.4,0.2)); + searchSuggest = searchSuggest( "Xor the Got-Jewel", "simple_phrase", phraseSuggest); assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel"); - searchSuggest = searchSuggest( "american ame", phraseSuggest); + searchSuggest = searchSuggest( "american ame", "simple_phrase", phraseSuggest); assertSuggestion(searchSuggest, 0, "simple_phrase", "american ace"); // try all smoothing methods - phraseSuggest.smoothingModel(new PhraseSuggestionBuilder.LinearInterpolation(0.4,0.4,0.2)); - searchSuggest = searchSuggest( "Xor the Got-Jewel", phraseSuggest); + phraseSuggest.smoothingModel(new LinearInterpolation(0.4,0.4,0.2)); + searchSuggest = searchSuggest( "Xor the Got-Jewel", "simple_phrase", phraseSuggest); assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel"); - phraseSuggest.smoothingModel(new PhraseSuggestionBuilder.Laplace(0.2)); - searchSuggest = searchSuggest( "Xor the Got-Jewel", phraseSuggest); + phraseSuggest.smoothingModel(new Laplace(0.2)); + searchSuggest = searchSuggest( "Xor the Got-Jewel", "simple_phrase", phraseSuggest); assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel"); - phraseSuggest.smoothingModel(new PhraseSuggestionBuilder.StupidBackoff(0.1)); - searchSuggest = searchSuggest( "Xor the Got-Jewel", phraseSuggest); + phraseSuggest.smoothingModel(new StupidBackoff(0.1)); + searchSuggest = searchSuggest( "Xor the Got-Jewel", "simple_phrase",phraseSuggest); assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel"); // check tokenLimit phraseSuggest.smoothingModel(null).tokenLimit(4); - searchSuggest = searchSuggest( "Xor the Got-Jewel", phraseSuggest); + searchSuggest = searchSuggest( "Xor the Got-Jewel", "simple_phrase", phraseSuggest); assertSuggestionSize(searchSuggest, 0, 0, "simple_phrase"); - phraseSuggest.tokenLimit(15).smoothingModel(new PhraseSuggestionBuilder.StupidBackoff(0.1)); - searchSuggest = searchSuggest( "Xor the Got-Jewel Xor the Got-Jewel Xor the Got-Jewel", phraseSuggest); + phraseSuggest.tokenLimit(15).smoothingModel(new StupidBackoff(0.1)); + searchSuggest = searchSuggest( "Xor the Got-Jewel Xor the Got-Jewel Xor the Got-Jewel", "simple_phrase", phraseSuggest); assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel xorr the god jewel xorr the god jewel"); // Check the name this time because we're repeating it which is funky assertThat(searchSuggest.getSuggestion("simple_phrase").getEntries().get(0).getText().string(), equalTo("Xor the Got-Jewel Xor the Got-Jewel Xor the Got-Jewel")); @@ -663,22 +674,21 @@ public class SuggestSearchTests extends ESIntegTestCase { index("test", "type1", "2", "body", line, "body_reverse", line, "bigram", line); refresh(); - PhraseSuggestionBuilder phraseSuggestion = phraseSuggestion("simple_phrase") + PhraseSuggestionBuilder phraseSuggestion = phraseSuggestion("bigram") .realWordErrorLikelihood(0.95f) - .field("bigram") .gramSize(2) .analyzer("body") .addCandidateGenerator(candidateGenerator("body").minWordLength(1).prefixLength(1).suggestMode("always").size(1).accuracy(0.1f)) - .smoothingModel(new PhraseSuggestionBuilder.StupidBackoff(0.1)) + .smoothingModel(new StupidBackoff(0.1)) .maxErrors(1.0f) .size(5); - Suggest searchSuggest = searchSuggest( "Xorr the Gut-Jewel", phraseSuggestion); + Suggest searchSuggest = searchSuggest("Xorr the Gut-Jewel", "simple_phrase", phraseSuggestion); assertSuggestionSize(searchSuggest, 0, 0, "simple_phrase"); // we allow a size of 2 now on the shard generator level so "god" will be found since it's LD2 phraseSuggestion.clearCandidateGenerators() .addCandidateGenerator(candidateGenerator("body").minWordLength(1).prefixLength(1).suggestMode("always").size(2).accuracy(0.1f)); - searchSuggest = searchSuggest( "Xorr the Gut-Jewel", phraseSuggestion); + searchSuggest = searchSuggest( "Xorr the Gut-Jewel", "simple_phrase", phraseSuggestion); assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel"); } @@ -723,51 +733,56 @@ public class SuggestSearchTests extends ESIntegTestCase { NumShards numShards = getNumShards("test"); // Lets make sure some things throw exceptions - PhraseSuggestionBuilder phraseSuggestion = phraseSuggestion("simple_phrase") - .field("bigram") + PhraseSuggestionBuilder phraseSuggestion = phraseSuggestion("bigram") .analyzer("body") .addCandidateGenerator(candidateGenerator("does_not_exist").minWordLength(1).suggestMode("always")) .realWordErrorLikelihood(0.95f) .maxErrors(0.5f) .size(1); + Map> suggestion = new HashMap<>(); + suggestion.put("simple_phrase", phraseSuggestion); try { - searchSuggest( "Xor the Got-Jewel", numShards.numPrimaries, phraseSuggestion); + searchSuggest("Xor the Got-Jewel", numShards.numPrimaries, suggestion); fail("field does not exists"); } catch (SearchPhaseExecutionException e) {} phraseSuggestion.clearCandidateGenerators().analyzer(null); try { - searchSuggest( "Xor the Got-Jewel", numShards.numPrimaries, phraseSuggestion); + searchSuggest("Xor the Got-Jewel", numShards.numPrimaries, suggestion); fail("analyzer does only produce ngrams"); } catch (SearchPhaseExecutionException e) { } phraseSuggestion.analyzer("bigram"); try { - searchSuggest( "Xor the Got-Jewel", numShards.numPrimaries, phraseSuggestion); + searchSuggest("Xor the Got-Jewel", numShards.numPrimaries, suggestion); fail("analyzer does only produce ngrams"); } catch (SearchPhaseExecutionException e) { } // Now we'll make sure some things don't phraseSuggestion.forceUnigrams(false); - searchSuggest( "Xor the Got-Jewel", phraseSuggestion); + searchSuggest( "Xor the Got-Jewel", 0, suggestion); // Field doesn't produce unigrams but the analyzer does - phraseSuggestion.forceUnigrams(true).field("bigram").analyzer("ngram"); - searchSuggest( "Xor the Got-Jewel", - phraseSuggestion); + phraseSuggestion.forceUnigrams(true).analyzer("ngram"); + searchSuggest( "Xor the Got-Jewel", 0, suggestion); - phraseSuggestion.field("ngram").analyzer("myDefAnalyzer") + phraseSuggestion = phraseSuggestion("ngram") + .analyzer("myDefAnalyzer") + .forceUnigrams(true) + .realWordErrorLikelihood(0.95f) + .maxErrors(0.5f) + .size(1) .addCandidateGenerator(candidateGenerator("body").minWordLength(1).suggestMode("always")); - Suggest suggest = searchSuggest( "Xor the Got-Jewel", phraseSuggestion); + Suggest suggest = searchSuggest( "Xor the Got-Jewel", 0, suggestion); // "xorr the god jewel" and and "xorn the god jewel" have identical scores (we are only using unigrams to score), so we tie break by // earlier term (xorn): assertSuggestion(suggest, 0, "simple_phrase", "xorn the god jewel"); phraseSuggestion.analyzer(null); - suggest = searchSuggest( "Xor the Got-Jewel", phraseSuggestion); + suggest = searchSuggest( "Xor the Got-Jewel", 0, suggestion); // In this case xorr has a better score than xorn because we set the field back to the default (my_shingle2) analyzer, so the // probability that the term is not in the dictionary but is NOT a misspelling is relatively high in this case compared to the @@ -782,9 +797,9 @@ public class SuggestSearchTests extends ESIntegTestCase { client().prepareIndex("test", "type1", "2").setSource("field1", "foobar2").setRouting("2"), client().prepareIndex("test", "type1", "3").setSource("field1", "foobar3").setRouting("3")); - Suggest suggest = searchSuggest( "foobar", - termSuggestion("simple") - .size(10).minDocFreq(0).field("field1").suggestMode("always")); + Suggest suggest = searchSuggest( "foobar", "simple", + termSuggestion("field1") + .size(10).minDocFreq(0).suggestMode(SuggestMode.ALWAYS)); ElasticsearchAssertions.assertSuggestionSize(suggest, 0, 3, "simple"); } @@ -823,15 +838,15 @@ public class SuggestSearchTests extends ESIntegTestCase { // When searching on a shard with a non existing mapping, we should fail SearchRequestBuilder request = client().prepareSearch().setSize(0) .suggest( - new SuggestBuilder().setText("tetsting sugestion").addSuggestion( - phraseSuggestion("did_you_mean").field("fielddoesnotexist").maxErrors(5.0f))); + new SuggestBuilder().setGlobalText("tetsting sugestion").addSuggestion("did_you_mean", + phraseSuggestion("fielddoesnotexist").maxErrors(5.0f))); assertThrows(request, SearchPhaseExecutionException.class); // When searching on a shard which does not hold yet any document of an existing type, we should not fail SearchResponse searchResponse = client().prepareSearch().setSize(0) .suggest( - new SuggestBuilder().setText("tetsting sugestion").addSuggestion( - phraseSuggestion("did_you_mean").field("name").maxErrors(5.0f))) + new SuggestBuilder().setGlobalText("tetsting sugestion").addSuggestion("did_you_mean", + phraseSuggestion("name").maxErrors(5.0f))) .get(); ElasticsearchAssertions.assertNoFailures(searchResponse); ElasticsearchAssertions.assertSuggestion(searchResponse.getSuggest(), 0, 0, "did_you_mean", "testing suggestions"); @@ -869,8 +884,8 @@ public class SuggestSearchTests extends ESIntegTestCase { SearchResponse searchResponse = client().prepareSearch() .setSize(0) .suggest( - new SuggestBuilder().setText("tetsting sugestion").addSuggestion( - phraseSuggestion("did_you_mean").field("name").maxErrors(5.0f))) + new SuggestBuilder().setGlobalText("tetsting sugestion").addSuggestion("did_you_mean", + phraseSuggestion("name").maxErrors(5.0f))) .get(); assertNoFailures(searchResponse); @@ -927,17 +942,15 @@ public class SuggestSearchTests extends ESIntegTestCase { } refresh(); - Suggest searchSuggest = searchSuggest("nobel prize", phraseSuggestion("simple_phrase") - .field("body") - .addCandidateGenerator(PhraseSuggestionBuilder.candidateGenerator("body").minWordLength(1).suggestMode("always").maxTermFreq(.99f)) + Suggest searchSuggest = searchSuggest("nobel prize", "simple_phrase", phraseSuggestion("body") + .addCandidateGenerator(candidateGenerator("body").minWordLength(1).suggestMode("always").maxTermFreq(.99f)) .confidence(2f) .maxErrors(5f) .size(1)); assertSuggestionSize(searchSuggest, 0, 0, "simple_phrase"); - searchSuggest = searchSuggest("noble prize", phraseSuggestion("simple_phrase") - .field("body") - .addCandidateGenerator(PhraseSuggestionBuilder.candidateGenerator("body").minWordLength(1).suggestMode("always").maxTermFreq(.99f)) + searchSuggest = searchSuggest("noble prize", "simple_phrase", phraseSuggestion("body") + .addCandidateGenerator(candidateGenerator("body").minWordLength(1).suggestMode("always").maxTermFreq(.99f)) .confidence(2f) .maxErrors(5f) .size(1)); @@ -1067,8 +1080,7 @@ public class SuggestSearchTests extends ESIntegTestCase { indexRandom(true, builders); PhraseSuggestionBuilder suggest = phraseSuggestion("title") - .field("title") - .addCandidateGenerator(PhraseSuggestionBuilder.candidateGenerator("title") + .addCandidateGenerator(candidateGenerator("title") .suggestMode("always") .maxTermFreq(.99f) .size(1000) // Setting a silly high size helps of generate a larger list of candidates for testing. @@ -1078,13 +1090,13 @@ public class SuggestSearchTests extends ESIntegTestCase { .maxErrors(2f) .shardSize(30000) .size(30000); - Suggest searchSuggest = searchSuggest("united states house of representatives elections in washington 2006", suggest); + Suggest searchSuggest = searchSuggest("united states house of representatives elections in washington 2006", "title", suggest); assertSuggestion(searchSuggest, 0, 0, "title", "united states house of representatives elections in washington 2006"); assertSuggestionSize(searchSuggest, 0, 25480, "title"); // Just to prove that we've run through a ton of options suggest.size(1); long start = System.currentTimeMillis(); - searchSuggest = searchSuggest("united states house of representatives elections in washington 2006", suggest); + searchSuggest = searchSuggest("united states house of representatives elections in washington 2006", "title", suggest); long total = System.currentTimeMillis() - start; assertSuggestion(searchSuggest, 0, 0, "title", "united states house of representatives elections in washington 2006"); // assertThat(total, lessThan(1000L)); // Takes many seconds without fix - just for debugging @@ -1132,8 +1144,7 @@ public class SuggestSearchTests extends ESIntegTestCase { // suggest without collate PhraseSuggestionBuilder suggest = phraseSuggestion("title") - .field("title") - .addCandidateGenerator(PhraseSuggestionBuilder.candidateGenerator("title") + .addCandidateGenerator(candidateGenerator("title") .suggestMode("always") .maxTermFreq(.99f) .size(10) @@ -1143,7 +1154,7 @@ public class SuggestSearchTests extends ESIntegTestCase { .maxErrors(2f) .shardSize(30000) .size(10); - Suggest searchSuggest = searchSuggest("united states house of representatives elections in washington 2006", suggest); + Suggest searchSuggest = searchSuggest("united states house of representatives elections in washington 2006", "title", suggest); assertSuggestionSize(searchSuggest, 0, 10, "title"); // suggest with collate @@ -1156,11 +1167,11 @@ public class SuggestSearchTests extends ESIntegTestCase { .string(); PhraseSuggestionBuilder filteredQuerySuggest = suggest.collateQuery(filterString); filteredQuerySuggest.collateParams(Collections.singletonMap("field", "title")); - searchSuggest = searchSuggest("united states house of representatives elections in washington 2006", filteredQuerySuggest); + searchSuggest = searchSuggest("united states house of representatives elections in washington 2006", "title", filteredQuerySuggest); assertSuggestionSize(searchSuggest, 0, 2, "title"); // collate suggest with no result (boundary case) - searchSuggest = searchSuggest("Elections of Representatives Parliament", filteredQuerySuggest); + searchSuggest = searchSuggest("Elections of Representatives Parliament", "title", filteredQuerySuggest); assertSuggestionSize(searchSuggest, 0, 0, "title"); NumShards numShards = getNumShards("test"); @@ -1174,8 +1185,10 @@ public class SuggestSearchTests extends ESIntegTestCase { .endObject() .string(); PhraseSuggestionBuilder incorrectFilteredSuggest = suggest.collateQuery(incorrectFilterString); + Map> namedSuggestion = new HashMap<>(); + namedSuggestion.put("my_title_suggestion", incorrectFilteredSuggest); try { - searchSuggest("united states house of representatives elections in washington 2006", numShards.numPrimaries, incorrectFilteredSuggest); + searchSuggest("united states house of representatives elections in washington 2006", numShards.numPrimaries, namedSuggestion); fail("Post query error has been swallowed"); } catch(ElasticsearchException e) { // expected @@ -1191,7 +1204,7 @@ public class SuggestSearchTests extends ESIntegTestCase { .string(); PhraseSuggestionBuilder filteredFilterSuggest = suggest.collateQuery(filterStringAsFilter); - searchSuggest = searchSuggest("united states house of representatives elections in washington 2006", filteredFilterSuggest); + searchSuggest = searchSuggest("united states house of representatives elections in washington 2006", "title", filteredFilterSuggest); assertSuggestionSize(searchSuggest, 0, 2, "title"); // collate suggest with bad query @@ -1205,7 +1218,7 @@ public class SuggestSearchTests extends ESIntegTestCase { PhraseSuggestionBuilder in = suggest.collateQuery(filterStr); try { - searchSuggest("united states house of representatives elections in washington 2006", numShards.numPrimaries, in); + searchSuggest("united states house of representatives elections in washington 2006", numShards.numPrimaries, namedSuggestion); fail("Post filter error has been swallowed"); } catch(ElasticsearchException e) { //expected @@ -1223,7 +1236,7 @@ public class SuggestSearchTests extends ESIntegTestCase { PhraseSuggestionBuilder phraseSuggestWithNoParams = suggest.collateQuery(collateWithParams); try { - searchSuggest("united states house of representatives elections in washington 2006", numShards.numPrimaries, phraseSuggestWithNoParams); + searchSuggest("united states house of representatives elections in washington 2006", numShards.numPrimaries, namedSuggestion); fail("Malformed query (lack of additional params) should fail"); } catch (ElasticsearchException e) { // expected @@ -1235,33 +1248,35 @@ public class SuggestSearchTests extends ESIntegTestCase { params.put("query_field", "title"); PhraseSuggestionBuilder phraseSuggestWithParams = suggest.collateQuery(collateWithParams).collateParams(params); - searchSuggest = searchSuggest("united states house of representatives elections in washington 2006", phraseSuggestWithParams); + searchSuggest = searchSuggest("united states house of representatives elections in washington 2006", "title", phraseSuggestWithParams); assertSuggestionSize(searchSuggest, 0, 2, "title"); // collate query request with prune set to true PhraseSuggestionBuilder phraseSuggestWithParamsAndReturn = suggest.collateQuery(collateWithParams).collateParams(params).collatePrune(true); - searchSuggest = searchSuggest("united states house of representatives elections in washington 2006", phraseSuggestWithParamsAndReturn); + searchSuggest = searchSuggest("united states house of representatives elections in washington 2006", "title", phraseSuggestWithParamsAndReturn); assertSuggestionSize(searchSuggest, 0, 10, "title"); assertSuggestionPhraseCollateMatchExists(searchSuggest, "title", 2); } - protected Suggest searchSuggest(SuggestionBuilder... suggestion) { - return searchSuggest(null, suggestion); + protected Suggest searchSuggest(String name, SuggestionBuilder suggestion) { + return searchSuggest(null, name, suggestion); } - protected Suggest searchSuggest(String suggestText, SuggestionBuilder... suggestions) { - return searchSuggest(suggestText, 0, suggestions); + protected Suggest searchSuggest(String suggestText, String name, SuggestionBuilder suggestion) { + Map> map = new HashMap<>(); + map.put(name, suggestion); + return searchSuggest(suggestText, 0, map); } - protected Suggest searchSuggest(String suggestText, int expectShardsFailed, SuggestionBuilder... suggestions) { + protected Suggest searchSuggest(String suggestText, int expectShardsFailed, Map> suggestions) { if (randomBoolean()) { SearchRequestBuilder builder = client().prepareSearch().setSize(0); SuggestBuilder suggestBuilder = new SuggestBuilder(); if (suggestText != null) { - suggestBuilder.setText(suggestText); + suggestBuilder.setGlobalText(suggestText); } - for (SuggestionBuilder suggestion : suggestions) { - suggestBuilder.addSuggestion(suggestion); + for (Entry> suggestion : suggestions.entrySet()) { + suggestBuilder.addSuggestion(suggestion.getKey(), suggestion.getValue()); } builder.suggest(suggestBuilder); SearchResponse actionGet = builder.execute().actionGet(); @@ -1272,8 +1287,8 @@ public class SuggestSearchTests extends ESIntegTestCase { if (suggestText != null) { builder.setSuggestText(suggestText); } - for (SuggestionBuilder suggestion : suggestions) { - builder.addSuggestion(suggestion); + for (Entry> suggestion : suggestions.entrySet()) { + builder.addSuggestion(suggestion.getKey(), suggestion.getValue()); } SuggestResponse actionGet = builder.execute().actionGet(); diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/messy/tests/TemplateQueryParserTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/messy/tests/TemplateQueryParserTests.java index 78c40088575..c0ad3782bf9 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/messy/tests/TemplateQueryParserTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/messy/tests/TemplateQueryParserTests.java @@ -23,8 +23,8 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.Accountable; import org.elasticsearch.Version; import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Injector; @@ -153,7 +153,7 @@ public class TemplateQueryParserTests extends ESTestCase { } }); IndicesQueriesRegistry indicesQueriesRegistry = injector.getInstance(IndicesQueriesRegistry.class); - context = new QueryShardContext(idxSettings, bitsetFilterCache, indexFieldDataService, mapperService, similarityService, scriptService, indicesQueriesRegistry); + context = new QueryShardContext(idxSettings, bitsetFilterCache, indexFieldDataService, mapperService, similarityService, scriptService, indicesQueriesRegistry, null); } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Analyzer.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Analyzer.java index 33ab695d527..090667b4543 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Analyzer.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Analyzer.java @@ -19,21 +19,10 @@ package org.elasticsearch.painless; -import org.antlr.v4.runtime.ParserRuleContext; -import org.elasticsearch.painless.Definition.Cast; -import org.elasticsearch.painless.Definition.Constructor; -import org.elasticsearch.painless.Definition.Field; -import org.elasticsearch.painless.Definition.Method; -import org.elasticsearch.painless.Definition.Pair; -import org.elasticsearch.painless.Definition.Sort; -import org.elasticsearch.painless.Definition.Struct; -import org.elasticsearch.painless.Definition.Transform; -import org.elasticsearch.painless.Definition.Type; import org.elasticsearch.painless.PainlessParser.AfterthoughtContext; import org.elasticsearch.painless.PainlessParser.ArgumentsContext; import org.elasticsearch.painless.PainlessParser.AssignmentContext; import org.elasticsearch.painless.PainlessParser.BinaryContext; -import org.elasticsearch.painless.PainlessParser.BlockContext; import org.elasticsearch.painless.PainlessParser.BoolContext; import org.elasticsearch.painless.PainlessParser.BreakContext; import org.elasticsearch.painless.PainlessParser.CastContext; @@ -47,8 +36,8 @@ import org.elasticsearch.painless.PainlessParser.DecltypeContext; import org.elasticsearch.painless.PainlessParser.DeclvarContext; import org.elasticsearch.painless.PainlessParser.DoContext; import org.elasticsearch.painless.PainlessParser.EmptyContext; +import org.elasticsearch.painless.PainlessParser.EmptyscopeContext; import org.elasticsearch.painless.PainlessParser.ExprContext; -import org.elasticsearch.painless.PainlessParser.ExpressionContext; import org.elasticsearch.painless.PainlessParser.ExtbraceContext; import org.elasticsearch.painless.PainlessParser.ExtcallContext; import org.elasticsearch.painless.PainlessParser.ExtcastContext; @@ -75,7 +64,6 @@ import org.elasticsearch.painless.PainlessParser.PreincContext; import org.elasticsearch.painless.PainlessParser.ReturnContext; import org.elasticsearch.painless.PainlessParser.SingleContext; import org.elasticsearch.painless.PainlessParser.SourceContext; -import org.elasticsearch.painless.PainlessParser.StatementContext; import org.elasticsearch.painless.PainlessParser.ThrowContext; import org.elasticsearch.painless.PainlessParser.TrapContext; import org.elasticsearch.painless.PainlessParser.TrueContext; @@ -83,3077 +71,384 @@ import org.elasticsearch.painless.PainlessParser.TryContext; import org.elasticsearch.painless.PainlessParser.UnaryContext; import org.elasticsearch.painless.PainlessParser.WhileContext; -import java.util.ArrayDeque; -import java.util.Arrays; -import java.util.Deque; -import java.util.Iterator; -import java.util.List; -import java.util.Map; - -import static org.elasticsearch.painless.PainlessParser.ADD; -import static org.elasticsearch.painless.PainlessParser.BWAND; -import static org.elasticsearch.painless.PainlessParser.BWOR; -import static org.elasticsearch.painless.PainlessParser.BWXOR; -import static org.elasticsearch.painless.PainlessParser.DIV; -import static org.elasticsearch.painless.PainlessParser.LSH; -import static org.elasticsearch.painless.PainlessParser.MUL; -import static org.elasticsearch.painless.PainlessParser.REM; -import static org.elasticsearch.painless.PainlessParser.RSH; -import static org.elasticsearch.painless.PainlessParser.SUB; -import static org.elasticsearch.painless.PainlessParser.USH; - class Analyzer extends PainlessParserBaseVisitor { - private static class Variable { - final String name; - final Type type; - final int slot; - - private Variable(final String name, final Type type, final int slot) { - this.name = name; - this.type = type; - this.slot = slot; - } - } - static void analyze(final Metadata metadata) { new Analyzer(metadata); } - private final Metadata metadata; - private final Definition definition; - private final CompilerSettings settings; - - private final Deque scopes = new ArrayDeque<>(); - private final Deque variables = new ArrayDeque<>(); + private final AnalyzerStatement statement; + private final AnalyzerExpression expression; + private final AnalyzerExternal external; private Analyzer(final Metadata metadata) { - this.metadata = metadata; - definition = metadata.definition; - settings = metadata.settings; + final Definition definition = metadata.definition; - incrementScope(); - addVariable(null, "#this", definition.execType); - metadata.inputValueSlot = addVariable(null, "input", definition.smapType).slot; - metadata.scoreValueSlot = addVariable(null, "_score", definition.floatType).slot; - metadata.loopCounterSlot = addVariable(null, "#loop", definition.intType).slot; + final AnalyzerUtility utility = new AnalyzerUtility(); + final AnalyzerCaster caster = new AnalyzerCaster(definition); + final AnalyzerPromoter promoter = new AnalyzerPromoter(definition); + + statement = new AnalyzerStatement(metadata, this, utility, caster); + expression = new AnalyzerExpression(metadata, this, caster, promoter); + external = new AnalyzerExternal(metadata, this, utility, caster, promoter); + + utility.incrementScope(); + utility.addVariable(null, "#this", definition.execType); + metadata.inputValueSlot = utility.addVariable(null, "input", definition.smapType).slot; + metadata.scoreValueSlot = utility.addVariable(null, "_score", definition.floatType).slot; + metadata.loopCounterSlot = utility.addVariable(null, "#loop", definition.intType).slot; metadata.createStatementMetadata(metadata.root); visit(metadata.root); - decrementScope(); - } - - void incrementScope() { - scopes.push(0); - } - - void decrementScope() { - int remove = scopes.pop(); - - while (remove > 0) { - variables.pop(); - --remove; - } - } - - Variable getVariable(final String name) { - final Iterator itr = variables.iterator(); - - while (itr.hasNext()) { - final Variable variable = itr.next(); - - if (variable.name.equals(name)) { - return variable; - } - } - - return null; - } - - Variable addVariable(final ParserRuleContext source, final String name, final Type type) { - if (getVariable(name) != null) { - if (source == null) { - throw new IllegalArgumentException("Argument name [" + name + "] already defined within the scope."); - } else { - throw new IllegalArgumentException( - Metadata.error(source) + "Variable name [" + name + "] already defined within the scope."); - } - } - - final Variable previous = variables.peekFirst(); - int slot = 0; - - if (previous != null) { - slot += previous.slot + previous.type.type.getSize(); - } - - final Variable variable = new Variable(name, type, slot); - variables.push(variable); - - final int update = scopes.pop() + 1; - scopes.push(update); - - return variable; + utility.decrementScope(); } @Override public Void visitSource(final SourceContext ctx) { - final Metadata.StatementMetadata sourcesmd = metadata.getStatementMetadata(ctx); - final List statectxs = ctx.statement(); - final StatementContext lastctx = statectxs.get(statectxs.size() - 1); - - incrementScope(); - - for (final StatementContext statectx : statectxs) { - if (sourcesmd.allLast) { - throw new IllegalArgumentException(Metadata.error(statectx) + - "Statement will never be executed because all prior paths escape."); - } - - final Metadata.StatementMetadata statesmd = metadata.createStatementMetadata(statectx); - statesmd.lastSource = statectx == lastctx; - visit(statectx); - - sourcesmd.methodEscape = statesmd.methodEscape; - sourcesmd.allLast = statesmd.allLast; - } - - decrementScope(); + statement.processSource(ctx); return null; } @Override public Void visitIf(final IfContext ctx) { - final Metadata.StatementMetadata ifsmd = metadata.getStatementMetadata(ctx); - - final ExpressionContext exprctx = metadata.updateExpressionTree(ctx.expression()); - final Metadata.ExpressionMetadata expremd = metadata.createExpressionMetadata(exprctx); - expremd.to = definition.booleanType; - visit(exprctx); - markCast(expremd); - - if (expremd.postConst != null) { - throw new IllegalArgumentException(Metadata.error(ctx) + "If statement is not necessary."); - } - - final BlockContext blockctx0 = ctx.block(0); - final Metadata.StatementMetadata blocksmd0 = metadata.createStatementMetadata(blockctx0); - blocksmd0.lastSource = ifsmd.lastSource; - blocksmd0.inLoop = ifsmd.inLoop; - blocksmd0.lastLoop = ifsmd.lastLoop; - incrementScope(); - visit(blockctx0); - decrementScope(); - - ifsmd.anyContinue = blocksmd0.anyContinue; - ifsmd.anyBreak = blocksmd0.anyBreak; - - ifsmd.count = blocksmd0.count; - - if (ctx.ELSE() != null) { - final BlockContext blockctx1 = ctx.block(1); - final Metadata.StatementMetadata blocksmd1 = metadata.createStatementMetadata(blockctx1); - blocksmd1.lastSource = ifsmd.lastSource; - incrementScope(); - visit(blockctx1); - decrementScope(); - - ifsmd.methodEscape = blocksmd0.methodEscape && blocksmd1.methodEscape; - ifsmd.loopEscape = blocksmd0.loopEscape && blocksmd1.loopEscape; - ifsmd.allLast = blocksmd0.allLast && blocksmd1.allLast; - ifsmd.anyContinue |= blocksmd1.anyContinue; - ifsmd.anyBreak |= blocksmd1.anyBreak; - - ifsmd.count = Math.max(ifsmd.count, blocksmd1.count); - } + statement.processIf(ctx); return null; } @Override public Void visitWhile(final WhileContext ctx) { - final Metadata.StatementMetadata whilesmd = metadata.getStatementMetadata(ctx); - - incrementScope(); - - final ExpressionContext exprctx = metadata.updateExpressionTree(ctx.expression()); - final Metadata.ExpressionMetadata expremd = metadata.createExpressionMetadata(exprctx); - expremd.to = definition.booleanType; - visit(exprctx); - markCast(expremd); - - boolean continuous = false; - - if (expremd.postConst != null) { - continuous = (boolean)expremd.postConst; - - if (!continuous) { - throw new IllegalArgumentException(Metadata.error(ctx) + "The loop will never be executed."); - } - - if (ctx.empty() != null) { - throw new IllegalArgumentException(Metadata.error(ctx) + "The loop will never exit."); - } - } - - final BlockContext blockctx = ctx.block(); - - if (blockctx != null) { - final Metadata.StatementMetadata blocksmd = metadata.createStatementMetadata(blockctx); - blocksmd.beginLoop = true; - blocksmd.inLoop = true; - visit(blockctx); - - if (blocksmd.loopEscape && !blocksmd.anyContinue) { - throw new IllegalArgumentException(Metadata.error(ctx) + "All paths escape so the loop is not necessary."); - } - - if (continuous && !blocksmd.anyBreak) { - whilesmd.methodEscape = true; - whilesmd.allLast = true; - } - } - - whilesmd.count = 1; - - decrementScope(); + statement.processWhile(ctx); return null; } @Override public Void visitDo(final DoContext ctx) { - final Metadata.StatementMetadata dosmd = metadata.getStatementMetadata(ctx); - - incrementScope(); - - final BlockContext blockctx = ctx.block(); - final Metadata.StatementMetadata blocksmd = metadata.createStatementMetadata(blockctx); - blocksmd.beginLoop = true; - blocksmd.inLoop = true; - visit(blockctx); - - if (blocksmd.loopEscape && !blocksmd.anyContinue) { - throw new IllegalArgumentException(Metadata.error(ctx) + "All paths escape so the loop is not necessary."); - } - - final ExpressionContext exprctx = metadata.updateExpressionTree(ctx.expression()); - final Metadata.ExpressionMetadata expremd = metadata.createExpressionMetadata(exprctx); - expremd.to = definition.booleanType; - visit(exprctx); - markCast(expremd); - - if (expremd.postConst != null) { - final boolean continuous = (boolean)expremd.postConst; - - if (!continuous) { - throw new IllegalArgumentException(Metadata.error(ctx) + "All paths escape so the loop is not necessary."); - } - - if (!blocksmd.anyBreak) { - dosmd.methodEscape = true; - dosmd.allLast = true; - } - } - - dosmd.count = 1; - - decrementScope(); + statement.processDo(ctx); return null; } @Override public Void visitFor(final ForContext ctx) { - final Metadata.StatementMetadata forsmd = metadata.getStatementMetadata(ctx); - boolean continuous = false; - - incrementScope(); - - final InitializerContext initctx = ctx.initializer(); - - if (initctx != null) { - metadata.createStatementMetadata(initctx); - visit(initctx); - } - - final ExpressionContext exprctx = metadata.updateExpressionTree(ctx.expression()); - - if (exprctx != null) { - final Metadata.ExpressionMetadata expremd = metadata.createExpressionMetadata(exprctx); - expremd.to = definition.booleanType; - visit(exprctx); - markCast(expremd); - - if (expremd.postConst != null) { - continuous = (boolean)expremd.postConst; - - if (!continuous) { - throw new IllegalArgumentException(Metadata.error(ctx) + "The loop will never be executed."); - } - - if (ctx.empty() != null) { - throw new IllegalArgumentException(Metadata.error(ctx) + "The loop is continuous."); - } - } - } else { - continuous = true; - } - - final AfterthoughtContext atctx = ctx.afterthought(); - - if (atctx != null) { - metadata.createStatementMetadata(atctx); - visit(atctx); - } - - final BlockContext blockctx = ctx.block(); - - if (blockctx != null) { - final Metadata.StatementMetadata blocksmd = metadata.createStatementMetadata(blockctx); - blocksmd.beginLoop = true; - blocksmd.inLoop = true; - visit(blockctx); - - if (blocksmd.loopEscape && !blocksmd.anyContinue) { - throw new IllegalArgumentException(Metadata.error(ctx) + "All paths escape so the loop is not necessary."); - } - - if (continuous && !blocksmd.anyBreak) { - forsmd.methodEscape = true; - forsmd.allLast = true; - } - } - - forsmd.count = 1; - - decrementScope(); + statement.processFor(ctx); return null; } @Override public Void visitDecl(final DeclContext ctx) { - final Metadata.StatementMetadata declsmd = metadata.getStatementMetadata(ctx); - - final DeclarationContext declctx = ctx.declaration(); - metadata.createStatementMetadata(declctx); - visit(declctx); - - declsmd.count = 1; + statement.processDecl(ctx); return null; } @Override public Void visitContinue(final ContinueContext ctx) { - final Metadata.StatementMetadata continuesmd = metadata.getStatementMetadata(ctx); - - if (!continuesmd.inLoop) { - throw new IllegalArgumentException(Metadata.error(ctx) + "Cannot have a continue statement outside of a loop."); - } - - if (continuesmd.lastLoop) { - throw new IllegalArgumentException(Metadata.error(ctx) + "Unnessary continue statement at the end of a loop."); - } - - continuesmd.allLast = true; - continuesmd.anyContinue = true; - - continuesmd.count = 1; + statement.processContinue(ctx); return null; } @Override public Void visitBreak(final BreakContext ctx) { - final Metadata.StatementMetadata breaksmd = metadata.getStatementMetadata(ctx); - - if (!breaksmd.inLoop) { - throw new IllegalArgumentException(Metadata.error(ctx) + "Cannot have a break statement outside of a loop."); - } - - breaksmd.loopEscape = true; - breaksmd.allLast = true; - breaksmd.anyBreak = true; - - breaksmd.count = 1; + statement.processBreak(ctx); return null; } @Override public Void visitReturn(final ReturnContext ctx) { - final Metadata.StatementMetadata returnsmd = metadata.getStatementMetadata(ctx); - - final ExpressionContext exprctx = metadata.updateExpressionTree(ctx.expression()); - final Metadata.ExpressionMetadata expremd = metadata.createExpressionMetadata(exprctx); - expremd.to = definition.objectType; - visit(exprctx); - markCast(expremd); - - returnsmd.methodEscape = true; - returnsmd.loopEscape = true; - returnsmd.allLast = true; - - returnsmd.count = 1; + statement.processReturn(ctx); return null; } @Override public Void visitTry(final TryContext ctx) { - final Metadata.StatementMetadata trysmd = metadata.getStatementMetadata(ctx); - - final BlockContext blockctx = ctx.block(); - final Metadata.StatementMetadata blocksmd = metadata.createStatementMetadata(blockctx); - blocksmd.lastSource = trysmd.lastSource; - blocksmd.inLoop = trysmd.inLoop; - blocksmd.lastLoop = trysmd.lastLoop; - incrementScope(); - visit(blockctx); - decrementScope(); - - trysmd.methodEscape = blocksmd.methodEscape; - trysmd.loopEscape = blocksmd.loopEscape; - trysmd.allLast = blocksmd.allLast; - trysmd.anyContinue = blocksmd.anyContinue; - trysmd.anyBreak = blocksmd.anyBreak; - - int trapcount = 0; - - for (final TrapContext trapctx : ctx.trap()) { - final Metadata.StatementMetadata trapsmd = metadata.createStatementMetadata(trapctx); - trapsmd.lastSource = trysmd.lastSource; - trapsmd.inLoop = trysmd.inLoop; - trapsmd.lastLoop = trysmd.lastLoop; - incrementScope(); - visit(trapctx); - decrementScope(); - - trysmd.methodEscape &= trapsmd.methodEscape; - trysmd.loopEscape &= trapsmd.loopEscape; - trysmd.allLast &= trapsmd.allLast; - trysmd.anyContinue |= trapsmd.anyContinue; - trysmd.anyBreak |= trapsmd.anyBreak; - - trapcount = Math.max(trapcount, trapsmd.count); - } - - trysmd.count = blocksmd.count + trapcount; + statement.processTry(ctx); return null; } @Override public Void visitThrow(final ThrowContext ctx) { - final Metadata.StatementMetadata throwsmd = metadata.getStatementMetadata(ctx); - - final ExpressionContext exprctx = metadata.updateExpressionTree(ctx.expression()); - final Metadata.ExpressionMetadata expremd = metadata.createExpressionMetadata(exprctx); - expremd.to = definition.exceptionType; - visit(exprctx); - markCast(expremd); - - throwsmd.methodEscape = true; - throwsmd.loopEscape = true; - throwsmd.allLast = true; - - throwsmd.count = 1; + statement.processThrow(ctx); return null; } @Override public Void visitExpr(final ExprContext ctx) { - final Metadata.StatementMetadata exprsmd = metadata.getStatementMetadata(ctx); - final ExpressionContext exprctx = metadata.updateExpressionTree(ctx.expression()); - final Metadata.ExpressionMetadata expremd = metadata.createExpressionMetadata(exprctx); - expremd.read = exprsmd.lastSource; - visit(exprctx); - - if (!expremd.statement && !exprsmd.lastSource) { - throw new IllegalArgumentException(Metadata.error(ctx) + "Not a statement."); - } - - final boolean rtn = exprsmd.lastSource && expremd.from.sort != Sort.VOID; - exprsmd.methodEscape = rtn; - exprsmd.loopEscape = rtn; - exprsmd.allLast = rtn; - expremd.to = rtn ? definition.objectType : expremd.from; - markCast(expremd); - - exprsmd.count = 1; + statement.processExpr(ctx); return null; } @Override public Void visitMultiple(final MultipleContext ctx) { - final Metadata.StatementMetadata multiplesmd = metadata.getStatementMetadata(ctx); - final List statectxs = ctx.statement(); - final StatementContext lastctx = statectxs.get(statectxs.size() - 1); - - for (StatementContext statectx : statectxs) { - if (multiplesmd.allLast) { - throw new IllegalArgumentException(Metadata.error(statectx) + - "Statement will never be executed because all prior paths escape."); - } - - final Metadata.StatementMetadata statesmd = metadata.createStatementMetadata(statectx); - statesmd.lastSource = multiplesmd.lastSource && statectx == lastctx; - statesmd.inLoop = multiplesmd.inLoop; - statesmd.lastLoop = (multiplesmd.beginLoop || multiplesmd.lastLoop) && statectx == lastctx; - visit(statectx); - - multiplesmd.methodEscape = statesmd.methodEscape; - multiplesmd.loopEscape = statesmd.loopEscape; - multiplesmd.allLast = statesmd.allLast; - multiplesmd.anyContinue |= statesmd.anyContinue; - multiplesmd.anyBreak |= statesmd.anyBreak; - - multiplesmd.count += statesmd.count; - } + statement.processMultiple(ctx); return null; } @Override public Void visitSingle(final SingleContext ctx) { - final Metadata.StatementMetadata singlesmd = metadata.getStatementMetadata(ctx); - - final StatementContext statectx = ctx.statement(); - final Metadata.StatementMetadata statesmd = metadata.createStatementMetadata(statectx); - statesmd.lastSource = singlesmd.lastSource; - statesmd.inLoop = singlesmd.inLoop; - statesmd.lastLoop = singlesmd.beginLoop || singlesmd.lastLoop; - visit(statectx); - - singlesmd.methodEscape = statesmd.methodEscape; - singlesmd.loopEscape = statesmd.loopEscape; - singlesmd.allLast = statesmd.allLast; - singlesmd.anyContinue = statesmd.anyContinue; - singlesmd.anyBreak = statesmd.anyBreak; - - singlesmd.count = statesmd.count; + statement.processSingle(ctx); return null; } @Override public Void visitEmpty(final EmptyContext ctx) { - throw new UnsupportedOperationException(Metadata.error(ctx) + "Unexpected parser state."); + throw new UnsupportedOperationException(AnalyzerUtility.error(ctx) + "Unexpected state."); } @Override - public Void visitInitializer(InitializerContext ctx) { - final DeclarationContext declctx = ctx.declaration(); - final ExpressionContext exprctx = metadata.updateExpressionTree(ctx.expression()); + public Void visitEmptyscope(final EmptyscopeContext ctx) { + throw new UnsupportedOperationException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } - if (declctx != null) { - metadata.createStatementMetadata(declctx); - visit(declctx); - } else if (exprctx != null) { - final Metadata.ExpressionMetadata expremd = metadata.createExpressionMetadata(exprctx); - expremd.read = false; - visit(exprctx); - - expremd.to = expremd.from; - markCast(expremd); - - if (!expremd.statement) { - throw new IllegalArgumentException(Metadata.error(exprctx) + - "The intializer of a for loop must be a statement."); - } - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected parser state."); - } + @Override + public Void visitInitializer(final InitializerContext ctx) { + statement.processInitializer(ctx); return null; } @Override - public Void visitAfterthought(AfterthoughtContext ctx) { - final ExpressionContext exprctx = metadata.updateExpressionTree(ctx.expression()); - - if (exprctx != null) { - final Metadata.ExpressionMetadata expremd = metadata.createExpressionMetadata(exprctx); - expremd.read = false; - visit(exprctx); - - expremd.to = expremd.from; - markCast(expremd); - - if (!expremd.statement) { - throw new IllegalArgumentException(Metadata.error(exprctx) + - "The afterthought of a for loop must be a statement."); - } - } + public Void visitAfterthought(final AfterthoughtContext ctx) { + statement.processAfterthought(ctx); return null; } @Override public Void visitDeclaration(final DeclarationContext ctx) { - final DecltypeContext decltypectx = ctx.decltype(); - final Metadata.ExpressionMetadata decltypeemd = metadata.createExpressionMetadata(decltypectx); - visit(decltypectx); - - for (final DeclvarContext declvarctx : ctx.declvar()) { - final Metadata.ExpressionMetadata declvaremd = metadata.createExpressionMetadata(declvarctx); - declvaremd.to = decltypeemd.from; - visit(declvarctx); - } + statement.processDeclaration(ctx); return null; } @Override public Void visitDecltype(final DecltypeContext ctx) { - final Metadata.ExpressionMetadata decltypeemd = metadata.getExpressionMetadata(ctx); - - final String name = ctx.getText(); - decltypeemd.from = definition.getType(name); + statement.processDecltype(ctx); return null; } @Override public Void visitDeclvar(final DeclvarContext ctx) { - final Metadata.ExpressionMetadata declvaremd = metadata.getExpressionMetadata(ctx); - - final String name = ctx.ID().getText(); - declvaremd.postConst = addVariable(ctx, name, declvaremd.to).slot; - - final ExpressionContext exprctx = metadata.updateExpressionTree(ctx.expression()); - - if (exprctx != null) { - final Metadata.ExpressionMetadata expremd = metadata.createExpressionMetadata(exprctx); - expremd.to = declvaremd.to; - visit(exprctx); - markCast(expremd); - } + statement.processDeclvar(ctx); return null; } @Override public Void visitTrap(final TrapContext ctx) { - final Metadata.StatementMetadata trapsmd = metadata.getStatementMetadata(ctx); - - final String type = ctx.TYPE().getText(); - trapsmd.exception = definition.getType(type); - - try { - trapsmd.exception.clazz.asSubclass(Exception.class); - } catch (final ClassCastException exception) { - throw new IllegalArgumentException(Metadata.error(ctx) + "Invalid exception type [" + trapsmd.exception.name + "]."); - } - - final String id = ctx.ID().getText(); - trapsmd.slot = addVariable(ctx, id, trapsmd.exception).slot; - - final BlockContext blockctx = ctx.block(); - - if (blockctx != null) { - final Metadata.StatementMetadata blocksmd = metadata.createStatementMetadata(blockctx); - blocksmd.lastSource = trapsmd.lastSource; - blocksmd.inLoop = trapsmd.inLoop; - blocksmd.lastLoop = trapsmd.lastLoop; - visit(blockctx); - - trapsmd.methodEscape = blocksmd.methodEscape; - trapsmd.loopEscape = blocksmd.loopEscape; - trapsmd.allLast = blocksmd.allLast; - trapsmd.anyContinue = blocksmd.anyContinue; - trapsmd.anyBreak = blocksmd.anyBreak; - } else if (ctx.emptyscope() == null) { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected parser state."); - } + statement.processTrap(ctx); return null; } @Override public Void visitPrecedence(final PrecedenceContext ctx) { - throw new UnsupportedOperationException(Metadata.error(ctx) + "Unexpected parser state."); + throw new UnsupportedOperationException(AnalyzerUtility.error(ctx) + "Unexpected state."); } @Override public Void visitNumeric(final NumericContext ctx) { - final Metadata.ExpressionMetadata numericemd = metadata.getExpressionMetadata(ctx); - final boolean negate = ctx.parent instanceof UnaryContext && ((UnaryContext)ctx.parent).SUB() != null; - - if (ctx.DECIMAL() != null) { - final String svalue = (negate ? "-" : "") + ctx.DECIMAL().getText(); - - if (svalue.endsWith("f") || svalue.endsWith("F")) { - try { - numericemd.from = definition.floatType; - numericemd.preConst = Float.parseFloat(svalue.substring(0, svalue.length() - 1)); - } catch (NumberFormatException exception) { - throw new IllegalArgumentException(Metadata.error(ctx) + "Invalid float constant [" + svalue + "]."); - } - } else { - try { - numericemd.from = definition.doubleType; - numericemd.preConst = Double.parseDouble(svalue); - } catch (NumberFormatException exception) { - throw new IllegalArgumentException(Metadata.error(ctx) + "Invalid double constant [" + svalue + "]."); - } - } - } else { - String svalue = negate ? "-" : ""; - int radix; - - if (ctx.OCTAL() != null) { - svalue += ctx.OCTAL().getText(); - radix = 8; - } else if (ctx.INTEGER() != null) { - svalue += ctx.INTEGER().getText(); - radix = 10; - } else if (ctx.HEX() != null) { - svalue += ctx.HEX().getText(); - radix = 16; - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected parser state."); - } - - if (svalue.endsWith("d") || svalue.endsWith("D")) { - try { - numericemd.from = definition.doubleType; - numericemd.preConst = Double.parseDouble(svalue.substring(0, svalue.length() - 1)); - } catch (NumberFormatException exception) { - throw new IllegalArgumentException(Metadata.error(ctx) + "Invalid float constant [" + svalue + "]."); - } - } else if (svalue.endsWith("f") || svalue.endsWith("F")) { - try { - numericemd.from = definition.floatType; - numericemd.preConst = Float.parseFloat(svalue.substring(0, svalue.length() - 1)); - } catch (NumberFormatException exception) { - throw new IllegalArgumentException(Metadata.error(ctx) + "Invalid float constant [" + svalue + "]."); - } - } else if (svalue.endsWith("l") || svalue.endsWith("L")) { - try { - numericemd.from = definition.longType; - numericemd.preConst = Long.parseLong(svalue.substring(0, svalue.length() - 1), radix); - } catch (NumberFormatException exception) { - throw new IllegalArgumentException(Metadata.error(ctx) + "Invalid long constant [" + svalue + "]."); - } - } else { - try { - final Type type = numericemd.to; - final Sort sort = type == null ? Sort.INT : type.sort; - final int value = Integer.parseInt(svalue, radix); - - if (sort == Sort.BYTE && value >= Byte.MIN_VALUE && value <= Byte.MAX_VALUE) { - numericemd.from = definition.byteType; - numericemd.preConst = (byte)value; - } else if (sort == Sort.CHAR && value >= Character.MIN_VALUE && value <= Character.MAX_VALUE) { - numericemd.from = definition.charType; - numericemd.preConst = (char)value; - } else if (sort == Sort.SHORT && value >= Short.MIN_VALUE && value <= Short.MAX_VALUE) { - numericemd.from = definition.shortType; - numericemd.preConst = (short)value; - } else { - numericemd.from = definition.intType; - numericemd.preConst = value; - } - } catch (NumberFormatException exception) { - throw new IllegalArgumentException(Metadata.error(ctx) + "Invalid int constant [" + svalue + "]."); - } - } - } + expression.processNumeric(ctx); return null; } @Override public Void visitChar(final CharContext ctx) { - final Metadata.ExpressionMetadata charemd = metadata.getExpressionMetadata(ctx); - - if (ctx.CHAR() == null) { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected parser state."); - } - - charemd.preConst = ctx.CHAR().getText().charAt(0); - charemd.from = definition.charType; + expression.processChar(ctx); return null; } @Override public Void visitTrue(final TrueContext ctx) { - final Metadata.ExpressionMetadata trueemd = metadata.getExpressionMetadata(ctx); - - if (ctx.TRUE() == null) { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected parser state."); - } - - trueemd.preConst = true; - trueemd.from = definition.booleanType; + expression.processTrue(ctx); return null; } @Override public Void visitFalse(final FalseContext ctx) { - final Metadata.ExpressionMetadata falseemd = metadata.getExpressionMetadata(ctx); - - if (ctx.FALSE() == null) { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected parser state."); - } - - falseemd.preConst = false; - falseemd.from = definition.booleanType; + expression.processFalse(ctx); return null; } @Override public Void visitNull(final NullContext ctx) { - final Metadata.ExpressionMetadata nullemd = metadata.getExpressionMetadata(ctx); - - if (ctx.NULL() == null) { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected parser state."); - } - - nullemd.isNull = true; - - if (nullemd.to != null) { - if (nullemd.to.sort.primitive) { - throw new IllegalArgumentException("Cannot cast null to a primitive type [" + nullemd.to.name + "]."); - } - - nullemd.from = nullemd.to; - } else { - nullemd.from = definition.objectType; - } + expression.processNull(ctx); return null; } @Override public Void visitExternal(final ExternalContext ctx) { - final Metadata.ExpressionMetadata extemd = metadata.getExpressionMetadata(ctx); - - final ExtstartContext extstartctx = ctx.extstart(); - final Metadata.ExternalMetadata extstartemd = metadata.createExternalMetadata(extstartctx); - extstartemd.read = extemd.read; - visit(extstartctx); - - extemd.statement = extstartemd.statement; - extemd.preConst = extstartemd.constant; - extemd.from = extstartemd.current; - extemd.typesafe = extstartemd.current.sort != Sort.DEF; + expression.processExternal(ctx); return null; } @Override public Void visitPostinc(final PostincContext ctx) { - final Metadata.ExpressionMetadata postincemd = metadata.getExpressionMetadata(ctx); - - final ExtstartContext extstartctx = ctx.extstart(); - final Metadata.ExternalMetadata extstartemd = metadata.createExternalMetadata(extstartctx); - extstartemd.read = postincemd.read; - extstartemd.storeExpr = ctx.increment(); - extstartemd.token = ADD; - extstartemd.post = true; - visit(extstartctx); - - postincemd.statement = true; - postincemd.from = extstartemd.read ? extstartemd.current : definition.voidType; - postincemd.typesafe = extstartemd.current.sort != Sort.DEF; + expression.processPostinc(ctx); return null; } @Override public Void visitPreinc(final PreincContext ctx) { - final Metadata.ExpressionMetadata preincemd = metadata.getExpressionMetadata(ctx); - - final ExtstartContext extstartctx = ctx.extstart(); - final Metadata.ExternalMetadata extstartemd = metadata.createExternalMetadata(extstartctx); - extstartemd.read = preincemd.read; - extstartemd.storeExpr = ctx.increment(); - extstartemd.token = ADD; - extstartemd.pre = true; - visit(extstartctx); - - preincemd.statement = true; - preincemd.from = extstartemd.read ? extstartemd.current : definition.voidType; - preincemd.typesafe = extstartemd.current.sort != Sort.DEF; + expression.processPreinc(ctx); return null; } @Override public Void visitUnary(final UnaryContext ctx) { - final Metadata.ExpressionMetadata unaryemd = metadata.getExpressionMetadata(ctx); - - final ExpressionContext exprctx = metadata.updateExpressionTree(ctx.expression()); - final Metadata.ExpressionMetadata expremd = metadata.createExpressionMetadata(exprctx); - - if (ctx.BOOLNOT() != null) { - expremd.to = definition.booleanType; - visit(exprctx); - markCast(expremd); - - if (expremd.postConst != null) { - unaryemd.preConst = !(boolean)expremd.postConst; - } - - unaryemd.from = definition.booleanType; - } else if (ctx.BWNOT() != null || ctx.ADD() != null || ctx.SUB() != null) { - visit(exprctx); - - final Type promote = promoteNumeric(expremd.from, ctx.BWNOT() == null, true); - - if (promote == null) { - throw new ClassCastException("Cannot apply [" + ctx.getChild(0).getText() + "] " + - "operation to type [" + expremd.from.name + "]."); - } - - expremd.to = promote; - markCast(expremd); - - if (expremd.postConst != null) { - final Sort sort = promote.sort; - - if (ctx.BWNOT() != null) { - if (sort == Sort.INT) { - unaryemd.preConst = ~(int)expremd.postConst; - } else if (sort == Sort.LONG) { - unaryemd.preConst = ~(long)expremd.postConst; - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected parser state."); - } - } else if (ctx.SUB() != null) { - if (exprctx instanceof NumericContext) { - unaryemd.preConst = expremd.postConst; - } else { - if (sort == Sort.INT) { - if (settings.getNumericOverflow()) { - unaryemd.preConst = -(int)expremd.postConst; - } else { - unaryemd.preConst = Math.negateExact((int)expremd.postConst); - } - } else if (sort == Sort.LONG) { - if (settings.getNumericOverflow()) { - unaryemd.preConst = -(long)expremd.postConst; - } else { - unaryemd.preConst = Math.negateExact((long)expremd.postConst); - } - } else if (sort == Sort.FLOAT) { - unaryemd.preConst = -(float)expremd.postConst; - } else if (sort == Sort.DOUBLE) { - unaryemd.preConst = -(double)expremd.postConst; - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected parser state."); - } - } - } else if (ctx.ADD() != null) { - if (sort == Sort.INT) { - unaryemd.preConst = +(int)expremd.postConst; - } else if (sort == Sort.LONG) { - unaryemd.preConst = +(long)expremd.postConst; - } else if (sort == Sort.FLOAT) { - unaryemd.preConst = +(float)expremd.postConst; - } else if (sort == Sort.DOUBLE) { - unaryemd.preConst = +(double)expremd.postConst; - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected parser state."); - } - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected parser state."); - } - } - - unaryemd.from = promote; - unaryemd.typesafe = expremd.typesafe; - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected parser state."); - } + expression.processUnary(ctx); return null; } @Override public Void visitCast(final CastContext ctx) { - final Metadata.ExpressionMetadata castemd = metadata.getExpressionMetadata(ctx); - - final DecltypeContext decltypectx = ctx.decltype(); - final Metadata.ExpressionMetadata decltypemd = metadata.createExpressionMetadata(decltypectx); - visit(decltypectx); - - final Type type = decltypemd.from; - castemd.from = type; - - final ExpressionContext exprctx = metadata.updateExpressionTree(ctx.expression()); - final Metadata.ExpressionMetadata expremd = metadata.createExpressionMetadata(exprctx); - expremd.to = type; - expremd.explicit = true; - visit(exprctx); - markCast(expremd); - - if (expremd.postConst != null) { - castemd.preConst = expremd.postConst; - } - - castemd.typesafe = expremd.typesafe && castemd.from.sort != Sort.DEF; + expression.processCast(ctx); return null; } @Override public Void visitBinary(final BinaryContext ctx) { - final Metadata.ExpressionMetadata binaryemd = metadata.getExpressionMetadata(ctx); - - final ExpressionContext exprctx0 = metadata.updateExpressionTree(ctx.expression(0)); - final Metadata.ExpressionMetadata expremd0 = metadata.createExpressionMetadata(exprctx0); - visit(exprctx0); - - final ExpressionContext exprctx1 = metadata.updateExpressionTree(ctx.expression(1)); - final Metadata.ExpressionMetadata expremd1 = metadata.createExpressionMetadata(exprctx1); - visit(exprctx1); - - final boolean decimal = ctx.MUL() != null || ctx.DIV() != null || ctx.REM() != null || ctx.SUB() != null; - final boolean add = ctx.ADD() != null; - final boolean xor = ctx.BWXOR() != null; - final Type promote = add ? promoteAdd(expremd0.from, expremd1.from) : - xor ? promoteXor(expremd0.from, expremd1.from) : - promoteNumeric(expremd0.from, expremd1.from, decimal, true); - - if (promote == null) { - throw new ClassCastException("Cannot apply [" + ctx.getChild(1).getText() + "] " + - "operation to types [" + expremd0.from.name + "] and [" + expremd1.from.name + "]."); - } - - final Sort sort = promote.sort; - expremd0.to = add && sort == Sort.STRING ? expremd0.from : promote; - expremd1.to = add && sort == Sort.STRING ? expremd1.from : promote; - markCast(expremd0); - markCast(expremd1); - - if (expremd0.postConst != null && expremd1.postConst != null) { - if (ctx.MUL() != null) { - if (sort == Sort.INT) { - if (settings.getNumericOverflow()) { - binaryemd.preConst = (int)expremd0.postConst * (int)expremd1.postConst; - } else { - binaryemd.preConst = Math.multiplyExact((int)expremd0.postConst, (int)expremd1.postConst); - } - } else if (sort == Sort.LONG) { - if (settings.getNumericOverflow()) { - binaryemd.preConst = (long)expremd0.postConst * (long)expremd1.postConst; - } else { - binaryemd.preConst = Math.multiplyExact((long)expremd0.postConst, (long)expremd1.postConst); - } - } else if (sort == Sort.FLOAT) { - if (settings.getNumericOverflow()) { - binaryemd.preConst = (float)expremd0.postConst * (float)expremd1.postConst; - } else { - binaryemd.preConst = Utility.multiplyWithoutOverflow((float)expremd0.postConst, (float)expremd1.postConst); - } - } else if (sort == Sort.DOUBLE) { - if (settings.getNumericOverflow()) { - binaryemd.preConst = (double)expremd0.postConst * (double)expremd1.postConst; - } else { - binaryemd.preConst = Utility.multiplyWithoutOverflow((double)expremd0.postConst, (double)expremd1.postConst); - } - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected parser state."); - } - } else if (ctx.DIV() != null) { - if (sort == Sort.INT) { - if (settings.getNumericOverflow()) { - binaryemd.preConst = (int)expremd0.postConst / (int)expremd1.postConst; - } else { - binaryemd.preConst = Utility.divideWithoutOverflow((int)expremd0.postConst, (int)expremd1.postConst); - } - } else if (sort == Sort.LONG) { - if (settings.getNumericOverflow()) { - binaryemd.preConst = (long)expremd0.postConst / (long)expremd1.postConst; - } else { - binaryemd.preConst = Utility.divideWithoutOverflow((long)expremd0.postConst, (long)expremd1.postConst); - } - } else if (sort == Sort.FLOAT) { - if (settings.getNumericOverflow()) { - binaryemd.preConst = (float)expremd0.postConst / (float)expremd1.postConst; - } else { - binaryemd.preConst = Utility.divideWithoutOverflow((float)expremd0.postConst, (float)expremd1.postConst); - } - } else if (sort == Sort.DOUBLE) { - if (settings.getNumericOverflow()) { - binaryemd.preConst = (double)expremd0.postConst / (double)expremd1.postConst; - } else { - binaryemd.preConst = Utility.divideWithoutOverflow((double)expremd0.postConst, (double)expremd1.postConst); - } - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected parser state."); - } - } else if (ctx.REM() != null) { - if (sort == Sort.INT) { - binaryemd.preConst = (int)expremd0.postConst % (int)expremd1.postConst; - } else if (sort == Sort.LONG) { - binaryemd.preConst = (long)expremd0.postConst % (long)expremd1.postConst; - } else if (sort == Sort.FLOAT) { - if (settings.getNumericOverflow()) { - binaryemd.preConst = (float)expremd0.postConst % (float)expremd1.postConst; - } else { - binaryemd.preConst = Utility.remainderWithoutOverflow((float)expremd0.postConst, (float)expremd1.postConst); - } - } else if (sort == Sort.DOUBLE) { - if (settings.getNumericOverflow()) { - binaryemd.preConst = (double)expremd0.postConst % (double)expremd1.postConst; - } else { - binaryemd.preConst = Utility.remainderWithoutOverflow((double)expremd0.postConst, (double)expremd1.postConst); - } - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected parser state."); - } - } else if (ctx.ADD() != null) { - if (sort == Sort.INT) { - if (settings.getNumericOverflow()) { - binaryemd.preConst = (int)expremd0.postConst + (int)expremd1.postConst; - } else { - binaryemd.preConst = Math.addExact((int)expremd0.postConst, (int)expremd1.postConst); - } - } else if (sort == Sort.LONG) { - if (settings.getNumericOverflow()) { - binaryemd.preConst = (long)expremd0.postConst + (long)expremd1.postConst; - } else { - binaryemd.preConst = Math.addExact((long)expremd0.postConst, (long)expremd1.postConst); - } - } else if (sort == Sort.FLOAT) { - if (settings.getNumericOverflow()) { - binaryemd.preConst = (float)expremd0.postConst + (float)expremd1.postConst; - } else { - binaryemd.preConst = Utility.addWithoutOverflow((float)expremd0.postConst, (float)expremd1.postConst); - } - } else if (sort == Sort.DOUBLE) { - if (settings.getNumericOverflow()) { - binaryemd.preConst = (double)expremd0.postConst + (double)expremd1.postConst; - } else { - binaryemd.preConst = Utility.addWithoutOverflow((double)expremd0.postConst, (double)expremd1.postConst); - } - } else if (sort == Sort.STRING) { - binaryemd.preConst = "" + expremd0.postConst + expremd1.postConst; - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected parser state."); - } - } else if (ctx.SUB() != null) { - if (sort == Sort.INT) { - if (settings.getNumericOverflow()) { - binaryemd.preConst = (int)expremd0.postConst - (int)expremd1.postConst; - } else { - binaryemd.preConst = Math.subtractExact((int)expremd0.postConst, (int)expremd1.postConst); - } - } else if (sort == Sort.LONG) { - if (settings.getNumericOverflow()) { - binaryemd.preConst = (long)expremd0.postConst - (long)expremd1.postConst; - } else { - binaryemd.preConst = Math.subtractExact((long)expremd0.postConst, (long)expremd1.postConst); - } - } else if (sort == Sort.FLOAT) { - if (settings.getNumericOverflow()) { - binaryemd.preConst = (float)expremd0.postConst - (float)expremd1.postConst; - } else { - binaryemd.preConst = Utility.subtractWithoutOverflow((float)expremd0.postConst, (float)expremd1.postConst); - } - } else if (sort == Sort.DOUBLE) { - if (settings.getNumericOverflow()) { - binaryemd.preConst = (double)expremd0.postConst - (double)expremd1.postConst; - } else { - binaryemd.preConst = Utility.subtractWithoutOverflow((double)expremd0.postConst, (double)expremd1.postConst); - } - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected parser state."); - } - } else if (ctx.LSH() != null) { - if (sort == Sort.INT) { - binaryemd.preConst = (int)expremd0.postConst << (int)expremd1.postConst; - } else if (sort == Sort.LONG) { - binaryemd.preConst = (long)expremd0.postConst << (long)expremd1.postConst; - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected parser state."); - } - } else if (ctx.RSH() != null) { - if (sort == Sort.INT) { - binaryemd.preConst = (int)expremd0.postConst >> (int)expremd1.postConst; - } else if (sort == Sort.LONG) { - binaryemd.preConst = (long)expremd0.postConst >> (long)expremd1.postConst; - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected parser state."); - } - } else if (ctx.USH() != null) { - if (sort == Sort.INT) { - binaryemd.preConst = (int)expremd0.postConst >>> (int)expremd1.postConst; - } else if (sort == Sort.LONG) { - binaryemd.preConst = (long)expremd0.postConst >>> (long)expremd1.postConst; - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected parser state."); - } - } else if (ctx.BWAND() != null) { - if (sort == Sort.INT) { - binaryemd.preConst = (int)expremd0.postConst & (int)expremd1.postConst; - } else if (sort == Sort.LONG) { - binaryemd.preConst = (long)expremd0.postConst & (long)expremd1.postConst; - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected parser state."); - } - } else if (ctx.BWXOR() != null) { - if (sort == Sort.BOOL) { - binaryemd.preConst = (boolean)expremd0.postConst ^ (boolean)expremd1.postConst; - } else if (sort == Sort.INT) { - binaryemd.preConst = (int)expremd0.postConst ^ (int)expremd1.postConst; - } else if (sort == Sort.LONG) { - binaryemd.preConst = (long)expremd0.postConst ^ (long)expremd1.postConst; - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected parser state."); - } - } else if (ctx.BWOR() != null) { - if (sort == Sort.INT) { - binaryemd.preConst = (int)expremd0.postConst | (int)expremd1.postConst; - } else if (sort == Sort.LONG) { - binaryemd.preConst = (long)expremd0.postConst | (long)expremd1.postConst; - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected parser state."); - } - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected parser state."); - } - } - - binaryemd.from = promote; - binaryemd.typesafe = expremd0.typesafe && expremd1.typesafe; + expression.processBinary(ctx); return null; } @Override public Void visitComp(final CompContext ctx) { - final Metadata.ExpressionMetadata compemd = metadata.getExpressionMetadata(ctx); - final boolean equality = ctx.EQ() != null || ctx.NE() != null; - final boolean reference = ctx.EQR() != null || ctx.NER() != null; - - final ExpressionContext exprctx0 = metadata.updateExpressionTree(ctx.expression(0)); - final Metadata.ExpressionMetadata expremd0 = metadata.createExpressionMetadata(exprctx0); - visit(exprctx0); - - final ExpressionContext exprctx1 = metadata.updateExpressionTree(ctx.expression(1)); - final Metadata.ExpressionMetadata expremd1 = metadata.createExpressionMetadata(exprctx1); - visit(exprctx1); - - if (expremd0.isNull && expremd1.isNull) { - throw new IllegalArgumentException(Metadata.error(ctx) + "Unnecessary comparison of null constants."); - } - - final Type promote = equality ? promoteEquality(expremd0.from, expremd1.from) : - reference ? promoteReference(expremd0.from, expremd1.from) : - promoteNumeric(expremd0.from, expremd1.from, true, true); - - if (promote == null) { - throw new ClassCastException("Cannot apply [" + ctx.getChild(1).getText() + "] " + - "operation to types [" + expremd0.from.name + "] and [" + expremd1.from.name + "]."); - } - - expremd0.to = promote; - expremd1.to = promote; - markCast(expremd0); - markCast(expremd1); - - if (expremd0.postConst != null && expremd1.postConst != null) { - final Sort sort = promote.sort; - - if (ctx.EQ() != null || ctx.EQR() != null) { - if (sort == Sort.BOOL) { - compemd.preConst = (boolean)expremd0.postConst == (boolean)expremd1.postConst; - } else if (sort == Sort.INT) { - compemd.preConst = (int)expremd0.postConst == (int)expremd1.postConst; - } else if (sort == Sort.LONG) { - compemd.preConst = (long)expremd0.postConst == (long)expremd1.postConst; - } else if (sort == Sort.FLOAT) { - compemd.preConst = (float)expremd0.postConst == (float)expremd1.postConst; - } else if (sort == Sort.DOUBLE) { - compemd.preConst = (double)expremd0.postConst == (double)expremd1.postConst; - } else { - if (ctx.EQ() != null && !expremd0.isNull && !expremd1.isNull) { - compemd.preConst = expremd0.postConst.equals(expremd1.postConst); - } else if (ctx.EQR() != null) { - compemd.preConst = expremd0.postConst == expremd1.postConst; - } - } - } else if (ctx.NE() != null || ctx.NER() != null) { - if (sort == Sort.BOOL) { - compemd.preConst = (boolean)expremd0.postConst != (boolean)expremd1.postConst; - } else if (sort == Sort.INT) { - compemd.preConst = (int)expremd0.postConst != (int)expremd1.postConst; - } else if (sort == Sort.LONG) { - compemd.preConst = (long)expremd0.postConst != (long)expremd1.postConst; - } else if (sort == Sort.FLOAT) { - compemd.preConst = (float)expremd0.postConst != (float)expremd1.postConst; - } else if (sort == Sort.DOUBLE) { - compemd.preConst = (double)expremd0.postConst != (double)expremd1.postConst; - } else { - if (ctx.NE() != null && !expremd0.isNull && !expremd1.isNull) { - compemd.preConst = expremd0.postConst.equals(expremd1.postConst); - } else if (ctx.NER() != null) { - compemd.preConst = expremd0.postConst == expremd1.postConst; - } - } - } else if (ctx.GTE() != null) { - if (sort == Sort.INT) { - compemd.preConst = (int)expremd0.postConst >= (int)expremd1.postConst; - } else if (sort == Sort.LONG) { - compemd.preConst = (long)expremd0.postConst >= (long)expremd1.postConst; - } else if (sort == Sort.FLOAT) { - compemd.preConst = (float)expremd0.postConst >= (float)expremd1.postConst; - } else if (sort == Sort.DOUBLE) { - compemd.preConst = (double)expremd0.postConst >= (double)expremd1.postConst; - } - } else if (ctx.GT() != null) { - if (sort == Sort.INT) { - compemd.preConst = (int)expremd0.postConst > (int)expremd1.postConst; - } else if (sort == Sort.LONG) { - compemd.preConst = (long)expremd0.postConst > (long)expremd1.postConst; - } else if (sort == Sort.FLOAT) { - compemd.preConst = (float)expremd0.postConst > (float)expremd1.postConst; - } else if (sort == Sort.DOUBLE) { - compemd.preConst = (double)expremd0.postConst > (double)expremd1.postConst; - } - } else if (ctx.LTE() != null) { - if (sort == Sort.INT) { - compemd.preConst = (int)expremd0.postConst <= (int)expremd1.postConst; - } else if (sort == Sort.LONG) { - compemd.preConst = (long)expremd0.postConst <= (long)expremd1.postConst; - } else if (sort == Sort.FLOAT) { - compemd.preConst = (float)expremd0.postConst <= (float)expremd1.postConst; - } else if (sort == Sort.DOUBLE) { - compemd.preConst = (double)expremd0.postConst <= (double)expremd1.postConst; - } - } else if (ctx.LT() != null) { - if (sort == Sort.INT) { - compemd.preConst = (int)expremd0.postConst < (int)expremd1.postConst; - } else if (sort == Sort.LONG) { - compemd.preConst = (long)expremd0.postConst < (long)expremd1.postConst; - } else if (sort == Sort.FLOAT) { - compemd.preConst = (float)expremd0.postConst < (float)expremd1.postConst; - } else if (sort == Sort.DOUBLE) { - compemd.preConst = (double)expremd0.postConst < (double)expremd1.postConst; - } - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected parser state."); - } - } - - compemd.from = definition.booleanType; - compemd.typesafe = expremd0.typesafe && expremd1.typesafe; + expression.processComp(ctx); return null; } @Override public Void visitBool(final BoolContext ctx) { - final Metadata.ExpressionMetadata boolemd = metadata.getExpressionMetadata(ctx); - - final ExpressionContext exprctx0 = metadata.updateExpressionTree(ctx.expression(0)); - final Metadata.ExpressionMetadata expremd0 = metadata.createExpressionMetadata(exprctx0); - expremd0.to = definition.booleanType; - visit(exprctx0); - markCast(expremd0); - - final ExpressionContext exprctx1 = metadata.updateExpressionTree(ctx.expression(1)); - final Metadata.ExpressionMetadata expremd1 = metadata.createExpressionMetadata(exprctx1); - expremd1.to = definition.booleanType; - visit(exprctx1); - markCast(expremd1); - - if (expremd0.postConst != null && expremd1.postConst != null) { - if (ctx.BOOLAND() != null) { - boolemd.preConst = (boolean)expremd0.postConst && (boolean)expremd1.postConst; - } else if (ctx.BOOLOR() != null) { - boolemd.preConst = (boolean)expremd0.postConst || (boolean)expremd1.postConst; - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected parser state."); - } - } - - boolemd.from = definition.booleanType; - boolemd.typesafe = expremd0.typesafe && expremd1.typesafe; + expression.processBool(ctx); return null; } @Override public Void visitConditional(final ConditionalContext ctx) { - final Metadata.ExpressionMetadata condemd = metadata.getExpressionMetadata(ctx); - - final ExpressionContext exprctx0 = metadata.updateExpressionTree(ctx.expression(0)); - final Metadata.ExpressionMetadata expremd0 = metadata.createExpressionMetadata(exprctx0); - expremd0.to = definition.booleanType; - visit(exprctx0); - markCast(expremd0); - - if (expremd0.postConst != null) { - throw new IllegalArgumentException(Metadata.error(ctx) + "Unnecessary conditional statement."); - } - - final ExpressionContext exprctx1 = metadata.updateExpressionTree(ctx.expression(1)); - final Metadata.ExpressionMetadata expremd1 = metadata.createExpressionMetadata(exprctx1); - expremd1.to = condemd.to; - expremd1.explicit = condemd.explicit; - visit(exprctx1); - - final ExpressionContext exprctx2 = metadata.updateExpressionTree(ctx.expression(2)); - final Metadata.ExpressionMetadata expremd2 = metadata.createExpressionMetadata(exprctx2); - expremd2.to = condemd.to; - expremd2.explicit = condemd.explicit; - visit(exprctx2); - - if (condemd.to == null) { - final Type promote = promoteConditional(expremd1.from, expremd2.from, expremd1.preConst, expremd2.preConst); - - expremd1.to = promote; - expremd2.to = promote; - condemd.from = promote; - } else { - condemd.from = condemd.to; - } - - markCast(expremd1); - markCast(expremd2); - - condemd.typesafe = expremd0.typesafe && expremd1.typesafe; + expression.processConditional(ctx); return null; } @Override public Void visitAssignment(final AssignmentContext ctx) { - final Metadata.ExpressionMetadata assignemd = metadata.getExpressionMetadata(ctx); - - final ExtstartContext extstartctx = ctx.extstart(); - final Metadata.ExternalMetadata extstartemd = metadata.createExternalMetadata(extstartctx); - - extstartemd.read = assignemd.read; - extstartemd.storeExpr = metadata.updateExpressionTree(ctx.expression()); - - if (ctx.AMUL() != null) { - extstartemd.token = MUL; - } else if (ctx.ADIV() != null) { - extstartemd.token = DIV; - } else if (ctx.AREM() != null) { - extstartemd.token = REM; - } else if (ctx.AADD() != null) { - extstartemd.token = ADD; - } else if (ctx.ASUB() != null) { - extstartemd.token = SUB; - } else if (ctx.ALSH() != null) { - extstartemd.token = LSH; - } else if (ctx.AUSH() != null) { - extstartemd.token = USH; - } else if (ctx.ARSH() != null) { - extstartemd.token = RSH; - } else if (ctx.AAND() != null) { - extstartemd.token = BWAND; - } else if (ctx.AXOR() != null) { - extstartemd.token = BWXOR; - } else if (ctx.AOR() != null) { - extstartemd.token = BWOR; - } - - visit(extstartctx); - - assignemd.statement = true; - assignemd.from = extstartemd.read ? extstartemd.current : definition.voidType; - assignemd.typesafe = extstartemd.current.sort != Sort.DEF; + expression.processAssignment(ctx); return null; } @Override public Void visitExtstart(final ExtstartContext ctx) { - final ExtprecContext precctx = ctx.extprec(); - final ExtcastContext castctx = ctx.extcast(); - final ExttypeContext typectx = ctx.exttype(); - final ExtvarContext varctx = ctx.extvar(); - final ExtnewContext newctx = ctx.extnew(); - final ExtstringContext stringctx = ctx.extstring(); - - if (precctx != null) { - metadata.createExtNodeMetadata(ctx, precctx); - visit(precctx); - } else if (castctx != null) { - metadata.createExtNodeMetadata(ctx, castctx); - visit(castctx); - } else if (typectx != null) { - metadata.createExtNodeMetadata(ctx, typectx); - visit(typectx); - } else if (varctx != null) { - metadata.createExtNodeMetadata(ctx, varctx); - visit(varctx); - } else if (newctx != null) { - metadata.createExtNodeMetadata(ctx, newctx); - visit(newctx); - } else if (stringctx != null) { - metadata.createExtNodeMetadata(ctx, stringctx); - visit(stringctx); - } else { - throw new IllegalStateException(); - } + external.processExtstart(ctx); return null; } @Override public Void visitExtprec(final ExtprecContext ctx) { - final Metadata.ExtNodeMetadata precenmd = metadata.getExtNodeMetadata(ctx); - final ParserRuleContext parent = precenmd.parent; - final Metadata.ExternalMetadata parentemd = metadata.getExternalMetadata(parent); - - final ExtprecContext precctx = ctx.extprec(); - final ExtcastContext castctx = ctx.extcast(); - final ExttypeContext typectx = ctx.exttype(); - final ExtvarContext varctx = ctx.extvar(); - final ExtnewContext newctx = ctx.extnew(); - final ExtstringContext stringctx = ctx.extstring(); - - final ExtdotContext dotctx = ctx.extdot(); - final ExtbraceContext bracectx = ctx.extbrace(); - - if (dotctx != null || bracectx != null) { - ++parentemd.scope; - } - - if (precctx != null) { - metadata.createExtNodeMetadata(parent, precctx); - visit(precctx); - } else if (castctx != null) { - metadata.createExtNodeMetadata(parent, castctx); - visit(castctx); - } else if (typectx != null) { - metadata.createExtNodeMetadata(parent, typectx); - visit(typectx); - } else if (varctx != null) { - metadata.createExtNodeMetadata(parent, varctx); - visit(varctx); - } else if (newctx != null) { - metadata.createExtNodeMetadata(parent, newctx); - visit(newctx); - } else if (stringctx != null) { - metadata.createExtNodeMetadata(ctx, stringctx); - visit(stringctx); - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected parser state."); - } - - parentemd.statement = false; - - if (dotctx != null) { - --parentemd.scope; - - metadata.createExtNodeMetadata(parent, dotctx); - visit(dotctx); - } else if (bracectx != null) { - --parentemd.scope; - - metadata.createExtNodeMetadata(parent, bracectx); - visit(bracectx); - } + external.processExtprec(ctx); return null; } @Override public Void visitExtcast(final ExtcastContext ctx) { - final Metadata.ExtNodeMetadata castenmd = metadata.getExtNodeMetadata(ctx); - final ParserRuleContext parent = castenmd.parent; - final Metadata.ExternalMetadata parentemd = metadata.getExternalMetadata(parent); - - final ExtprecContext precctx = ctx.extprec(); - final ExtcastContext castctx = ctx.extcast(); - final ExttypeContext typectx = ctx.exttype(); - final ExtvarContext varctx = ctx.extvar(); - final ExtnewContext newctx = ctx.extnew(); - final ExtstringContext stringctx = ctx.extstring(); - - if (precctx != null) { - metadata.createExtNodeMetadata(parent, precctx); - visit(precctx); - } else if (castctx != null) { - metadata.createExtNodeMetadata(parent, castctx); - visit(castctx); - } else if (typectx != null) { - metadata.createExtNodeMetadata(parent, typectx); - visit(typectx); - } else if (varctx != null) { - metadata.createExtNodeMetadata(parent, varctx); - visit(varctx); - } else if (newctx != null) { - metadata.createExtNodeMetadata(parent, newctx); - visit(newctx); - } else if (stringctx != null) { - metadata.createExtNodeMetadata(ctx, stringctx); - visit(stringctx); - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected parser state."); - } - - final DecltypeContext declctx = ctx.decltype(); - final Metadata.ExpressionMetadata declemd = metadata.createExpressionMetadata(declctx); - visit(declctx); - - castenmd.castTo = getLegalCast(ctx, parentemd.current, declemd.from, true); - castenmd.type = declemd.from; - parentemd.current = declemd.from; - parentemd.statement = false; + external.processExtcast(ctx); return null; } @Override public Void visitExtbrace(final ExtbraceContext ctx) { - final Metadata.ExtNodeMetadata braceenmd = metadata.getExtNodeMetadata(ctx); - final ParserRuleContext parent = braceenmd.parent; - final Metadata.ExternalMetadata parentemd = metadata.getExternalMetadata(parent); - - final boolean array = parentemd.current.sort == Sort.ARRAY; - final boolean def = parentemd.current.sort == Sort.DEF; - boolean map = false; - boolean list = false; - - try { - parentemd.current.clazz.asSubclass(Map.class); - map = true; - } catch (ClassCastException exception) { - // Do nothing. - } - - try { - parentemd.current.clazz.asSubclass(List.class); - list = true; - } catch (ClassCastException exception) { - // Do nothing. - } - - final ExtdotContext dotctx = ctx.extdot(); - final ExtbraceContext bracectx = ctx.extbrace(); - - braceenmd.last = parentemd.scope == 0 && dotctx == null && bracectx == null; - - final ExpressionContext exprctx = metadata.updateExpressionTree(ctx.expression()); - final Metadata.ExpressionMetadata expremd = metadata.createExpressionMetadata(exprctx); - - if (array || def) { - expremd.to = array ? definition.intType : definition.objectType; - visit(exprctx); - markCast(expremd); - - braceenmd.target = "#brace"; - braceenmd.type = def ? definition.defType : - definition.getType(parentemd.current.struct, parentemd.current.type.getDimensions() - 1); - analyzeLoadStoreExternal(ctx); - parentemd.current = braceenmd.type; - - if (dotctx != null) { - metadata.createExtNodeMetadata(parent, dotctx); - visit(dotctx); - } else if (bracectx != null) { - metadata.createExtNodeMetadata(parent, bracectx); - visit(bracectx); - } - } else { - final boolean store = braceenmd.last && parentemd.storeExpr != null; - final boolean get = parentemd.read || parentemd.token > 0 || !braceenmd.last; - final boolean set = braceenmd.last && store; - - Method getter; - Method setter; - Type valuetype; - Type settype; - - if (map) { - getter = parentemd.current.struct.methods.get("get"); - setter = parentemd.current.struct.methods.get("put"); - - if (getter != null && (getter.rtn.sort == Sort.VOID || getter.arguments.size() != 1)) { - throw new IllegalArgumentException(Metadata.error(ctx) + - "Illegal map get shortcut for type [" + parentemd.current.name + "]."); - } - - if (setter != null && setter.arguments.size() != 2) { - throw new IllegalArgumentException(Metadata.error(ctx) + - "Illegal map set shortcut for type [" + parentemd.current.name + "]."); - } - - if (getter != null && setter != null && (!getter.arguments.get(0).equals(setter.arguments.get(0)) - || !getter.rtn.equals(setter.arguments.get(1)))) { - throw new IllegalArgumentException(Metadata.error(ctx) + "Shortcut argument types must match."); - } - - valuetype = setter != null ? setter.arguments.get(0) : getter != null ? getter.arguments.get(0) : null; - settype = setter == null ? null : setter.arguments.get(1); - } else if (list) { - getter = parentemd.current.struct.methods.get("get"); - setter = parentemd.current.struct.methods.get("set"); - - if (getter != null && (getter.rtn.sort == Sort.VOID || getter.arguments.size() != 1 || - getter.arguments.get(0).sort != Sort.INT)) { - throw new IllegalArgumentException(Metadata.error(ctx) + - "Illegal list get shortcut for type [" + parentemd.current.name + "]."); - } - - if (setter != null && (setter.arguments.size() != 2 || setter.arguments.get(0).sort != Sort.INT)) { - throw new IllegalArgumentException(Metadata.error(ctx) + - "Illegal list set shortcut for type [" + parentemd.current.name + "]."); - } - - if (getter != null && setter != null && (!getter.arguments.get(0).equals(setter.arguments.get(0)) - || !getter.rtn.equals(setter.arguments.get(1)))) { - throw new IllegalArgumentException(Metadata.error(ctx) + "Shortcut argument types must match."); - } - - valuetype = definition.intType; - settype = setter == null ? null : setter.arguments.get(1); - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected parser state."); - } - - if ((get || set) && (!get || getter != null) && (!set || setter != null)) { - expremd.to = valuetype; - visit(exprctx); - markCast(expremd); - - braceenmd.target = new Object[] {getter, setter, true, null}; - braceenmd.type = get ? getter.rtn : settype; - analyzeLoadStoreExternal(ctx); - parentemd.current = get ? getter.rtn : setter.rtn; - } - } - - if (braceenmd.target == null) { - throw new IllegalArgumentException(Metadata.error(ctx) + - "Attempting to address a non-array type [" + parentemd.current.name + "] as an array."); - } + external.processExtbrace(ctx); return null; } @Override public Void visitExtdot(final ExtdotContext ctx) { - final Metadata.ExtNodeMetadata dotemnd = metadata.getExtNodeMetadata(ctx); - final ParserRuleContext parent = dotemnd.parent; - - final ExtcallContext callctx = ctx.extcall(); - final ExtfieldContext fieldctx = ctx.extfield(); - - if (callctx != null) { - metadata.createExtNodeMetadata(parent, callctx); - visit(callctx); - } else if (fieldctx != null) { - metadata.createExtNodeMetadata(parent, fieldctx); - visit(fieldctx); - } + external.processExtdot(ctx); return null; } @Override public Void visitExttype(final ExttypeContext ctx) { - final Metadata.ExtNodeMetadata typeenmd = metadata.getExtNodeMetadata(ctx); - final ParserRuleContext parent = typeenmd.parent; - final Metadata.ExternalMetadata parentemd = metadata.getExternalMetadata(parent); - - if (parentemd.current != null) { - throw new IllegalArgumentException(Metadata.error(ctx) + "Unexpected static type."); - } - - final String typestr = ctx.TYPE().getText(); - typeenmd.type = definition.getType(typestr); - parentemd.current = typeenmd.type; - parentemd.statik = true; - - final ExtdotContext dotctx = ctx.extdot(); - metadata.createExtNodeMetadata(parent, dotctx); - visit(dotctx); + external.processExttype(ctx); return null; } @Override public Void visitExtcall(final ExtcallContext ctx) { - final Metadata.ExtNodeMetadata callenmd = metadata.getExtNodeMetadata(ctx); - final ParserRuleContext parent = callenmd.parent; - final Metadata.ExternalMetadata parentemd = metadata.getExternalMetadata(parent); - - final ExtdotContext dotctx = ctx.extdot(); - final ExtbraceContext bracectx = ctx.extbrace(); - - callenmd.last = parentemd.scope == 0 && dotctx == null && bracectx == null; - - final String name = ctx.EXTID().getText(); - - if (parentemd.current.sort == Sort.ARRAY) { - throw new IllegalArgumentException(Metadata.error(ctx) + "Unexpected call [" + name + "] on an array."); - } else if (callenmd.last && parentemd.storeExpr != null) { - throw new IllegalArgumentException(Metadata.error(ctx) + "Cannot assign a value to a call [" + name + "]."); - } - - final Struct struct = parentemd.current.struct; - final List arguments = ctx.arguments().expression(); - final int size = arguments.size(); - Type[] types; - - final Method method = parentemd.statik ? struct.functions.get(name) : struct.methods.get(name); - final boolean def = parentemd.current.sort == Sort.DEF; - - if (method == null && !def) { - throw new IllegalArgumentException( - Metadata.error(ctx) + "Unknown call [" + name + "] on type [" + struct.name + "]."); - } else if (method != null) { - types = new Type[method.arguments.size()]; - method.arguments.toArray(types); - - callenmd.target = method; - callenmd.type = method.rtn; - parentemd.statement = !parentemd.read && callenmd.last; - parentemd.current = method.rtn; - - if (size != types.length) { - throw new IllegalArgumentException(Metadata.error(ctx) + "When calling [" + name + "] on type " + - "[" + struct.name + "] expected [" + types.length + "] arguments," + - " but found [" + arguments.size() + "]."); - } - } else { - types = new Type[arguments.size()]; - Arrays.fill(types, definition.defType); - - callenmd.target = name; - callenmd.type = definition.defType; - parentemd.statement = !parentemd.read && callenmd.last; - parentemd.current = callenmd.type; - } - - for (int argument = 0; argument < size; ++argument) { - final ExpressionContext exprctx = metadata.updateExpressionTree(arguments.get(argument)); - final Metadata.ExpressionMetadata expremd = metadata.createExpressionMetadata(exprctx); - expremd.to = types[argument]; - visit(exprctx); - markCast(expremd); - } - - parentemd.statik = false; - - if (dotctx != null) { - metadata.createExtNodeMetadata(parent, dotctx); - visit(dotctx); - } else if (bracectx != null) { - metadata.createExtNodeMetadata(parent, bracectx); - visit(bracectx); - } + external.processExtcall(ctx); return null; } @Override public Void visitExtvar(final ExtvarContext ctx) { - final Metadata.ExtNodeMetadata varenmd = metadata.getExtNodeMetadata(ctx); - final ParserRuleContext parent = varenmd.parent; - final Metadata.ExternalMetadata parentemd = metadata.getExternalMetadata(parent); - - final String name = ctx.ID().getText(); - - final ExtdotContext dotctx = ctx.extdot(); - final ExtbraceContext bracectx = ctx.extbrace(); - - if (parentemd.current != null) { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected variable [" + name + "] load."); - } - - varenmd.last = parentemd.scope == 0 && dotctx == null && bracectx == null; - - final Variable variable = getVariable(name); - - if (variable == null) { - throw new IllegalArgumentException(Metadata.error(ctx) + "Unknown variable [" + name + "]."); - } - - varenmd.target = variable.slot; - varenmd.type = variable.type; - analyzeLoadStoreExternal(ctx); - parentemd.current = varenmd.type; - - if (dotctx != null) { - metadata.createExtNodeMetadata(parent, dotctx); - visit(dotctx); - } else if (bracectx != null) { - metadata.createExtNodeMetadata(parent, bracectx); - visit(bracectx); - } + external.processExtvar(ctx); return null; } @Override public Void visitExtfield(final ExtfieldContext ctx) { - final Metadata.ExtNodeMetadata memberenmd = metadata.getExtNodeMetadata(ctx); - final ParserRuleContext parent = memberenmd.parent; - final Metadata.ExternalMetadata parentemd = metadata.getExternalMetadata(parent); - - if (ctx.EXTID() == null && ctx.EXTINTEGER() == null) { - throw new IllegalArgumentException(Metadata.error(ctx) + "Unexpected parser state."); - } - - final String value = ctx.EXTID() == null ? ctx.EXTINTEGER().getText() : ctx.EXTID().getText(); - - final ExtdotContext dotctx = ctx.extdot(); - final ExtbraceContext bracectx = ctx.extbrace(); - - memberenmd.last = parentemd.scope == 0 && dotctx == null && bracectx == null; - final boolean store = memberenmd.last && parentemd.storeExpr != null; - - if (parentemd.current == null) { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected field [" + value + "] load."); - } - - if (parentemd.current.sort == Sort.ARRAY) { - if ("length".equals(value)) { - if (!parentemd.read) { - throw new IllegalArgumentException(Metadata.error(ctx) + "Must read array field [length]."); - } else if (store) { - throw new IllegalArgumentException( - Metadata.error(ctx) + "Cannot write to read-only array field [length]."); - } - - memberenmd.target = "#length"; - memberenmd.type = definition.intType; - parentemd.current = definition.intType; - } else { - throw new IllegalArgumentException(Metadata.error(ctx) + "Unexpected array field [" + value + "]."); - } - } else if (parentemd.current.sort == Sort.DEF) { - memberenmd.target = value; - memberenmd.type = definition.defType; - analyzeLoadStoreExternal(ctx); - parentemd.current = memberenmd.type; - } else { - final Struct struct = parentemd.current.struct; - final Field field = parentemd.statik ? struct.statics.get(value) : struct.members.get(value); - - if (field != null) { - if (store && java.lang.reflect.Modifier.isFinal(field.reflect.getModifiers())) { - throw new IllegalArgumentException(Metadata.error(ctx) + "Cannot write to read-only" + - " field [" + value + "] for type [" + struct.name + "]."); - } - - memberenmd.target = field; - memberenmd.type = field.type; - analyzeLoadStoreExternal(ctx); - parentemd.current = memberenmd.type; - } else { - final boolean get = parentemd.read || parentemd.token > 0 || !memberenmd.last; - final boolean set = memberenmd.last && store; - - Method getter = struct.methods.get("get" + Character.toUpperCase(value.charAt(0)) + value.substring(1)); - Method setter = struct.methods.get("set" + Character.toUpperCase(value.charAt(0)) + value.substring(1)); - Object constant = null; - - if (getter != null && (getter.rtn.sort == Sort.VOID || !getter.arguments.isEmpty())) { - throw new IllegalArgumentException(Metadata.error(ctx) + - "Illegal get shortcut on field [" + value + "] for type [" + struct.name + "]."); - } - - if (setter != null && (setter.rtn.sort != Sort.VOID || setter.arguments.size() != 1)) { - throw new IllegalArgumentException(Metadata.error(ctx) + - "Illegal set shortcut on field [" + value + "] for type [" + struct.name + "]."); - } - - Type settype = setter == null ? null : setter.arguments.get(0); - - if (getter == null && setter == null) { - if (ctx.EXTID() != null) { - try { - parentemd.current.clazz.asSubclass(Map.class); - - getter = parentemd.current.struct.methods.get("get"); - setter = parentemd.current.struct.methods.get("put"); - - if (getter != null && (getter.rtn.sort == Sort.VOID || getter.arguments.size() != 1 || - getter.arguments.get(0).sort != Sort.STRING)) { - throw new IllegalArgumentException(Metadata.error(ctx) + - "Illegal map get shortcut [" + value + "] for type [" + struct.name + "]."); - } - - if (setter != null && (setter.arguments.size() != 2 || - setter.arguments.get(0).sort != Sort.STRING)) { - throw new IllegalArgumentException(Metadata.error(ctx) + - "Illegal map set shortcut [" + value + "] for type [" + struct.name + "]."); - } - - if (getter != null && setter != null && !getter.rtn.equals(setter.arguments.get(1))) { - throw new IllegalArgumentException(Metadata.error(ctx) + "Shortcut argument types must match."); - } - - settype = setter == null ? null : setter.arguments.get(1); - constant = value; - } catch (ClassCastException exception) { - //Do nothing. - } - } else if (ctx.EXTINTEGER() != null) { - try { - parentemd.current.clazz.asSubclass(List.class); - - getter = parentemd.current.struct.methods.get("get"); - setter = parentemd.current.struct.methods.get("set"); - - if (getter != null && (getter.rtn.sort == Sort.VOID || getter.arguments.size() != 1 || - getter.arguments.get(0).sort != Sort.INT)) { - throw new IllegalArgumentException(Metadata.error(ctx) + - "Illegal list get shortcut [" + value + "] for type [" + struct.name + "]."); - } - - if (setter != null && (setter.rtn.sort != Sort.VOID || setter.arguments.size() != 2 || - setter.arguments.get(0).sort != Sort.INT)) { - throw new IllegalArgumentException(Metadata.error(ctx) + - "Illegal list set shortcut [" + value + "] for type [" + struct.name + "]."); - } - - if (getter != null && setter != null && !getter.rtn.equals(setter.arguments.get(1))) { - throw new IllegalArgumentException(Metadata.error(ctx) + "Shortcut argument types must match."); - } - - settype = setter == null ? null : setter.arguments.get(1); - - try { - constant = Integer.parseInt(value); - } catch (NumberFormatException exception) { - throw new IllegalArgumentException(Metadata.error(ctx) + - "Illegal list shortcut value [" + value + "]."); - } - } catch (ClassCastException exception) { - //Do nothing. - } - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected parser state."); - } - } - - if ((get || set) && (!get || getter != null) && (!set || setter != null)) { - memberenmd.target = new Object[] {getter, setter, constant != null, constant}; - memberenmd.type = get ? getter.rtn : settype; - analyzeLoadStoreExternal(ctx); - parentemd.current = get ? getter.rtn : setter.rtn; - } - } - - if (memberenmd.target == null) { - throw new IllegalArgumentException( - Metadata.error(ctx) + "Unknown field [" + value + "] for type [" + struct.name + "]."); - } - } - - parentemd.statik = false; - - if (dotctx != null) { - metadata.createExtNodeMetadata(parent, dotctx); - visit(dotctx); - } else if (bracectx != null) { - metadata.createExtNodeMetadata(parent, bracectx); - visit(bracectx); - } + external.processExtfield(ctx); return null; } @Override - public Void visitExtnew(ExtnewContext ctx) { - final Metadata.ExtNodeMetadata newenmd = metadata.getExtNodeMetadata(ctx); - final ParserRuleContext parent = newenmd.parent; - final Metadata.ExternalMetadata parentemd = metadata.getExternalMetadata(parent); - - final ExtdotContext dotctx = ctx.extdot(); - final ExtbraceContext bracectx = ctx.extbrace(); - - newenmd.last = parentemd.scope == 0 && dotctx == null && bracectx == null; - - final String name = ctx.TYPE().getText(); - final Struct struct = definition.structs.get(name); - - if (parentemd.current != null) { - throw new IllegalArgumentException(Metadata.error(ctx) + "Unexpected new call."); - } else if (struct == null) { - throw new IllegalArgumentException(Metadata.error(ctx) + "Specified type [" + name + "] not found."); - } else if (newenmd.last && parentemd.storeExpr != null) { - throw new IllegalArgumentException(Metadata.error(ctx) + "Cannot assign a value to a new call."); - } - - final boolean newclass = ctx.arguments() != null; - final boolean newarray = !ctx.expression().isEmpty(); - - final List arguments = newclass ? ctx.arguments().expression() : ctx.expression(); - final int size = arguments.size(); - - Type[] types; - - if (newarray) { - if (!parentemd.read) { - throw new IllegalArgumentException(Metadata.error(ctx) + "A newly created array must be assigned."); - } - - types = new Type[size]; - Arrays.fill(types, definition.intType); - - newenmd.target = "#makearray"; - - if (size > 1) { - newenmd.type = definition.getType(struct, size); - parentemd.current = newenmd.type; - } else if (size == 1) { - newenmd.type = definition.getType(struct, 0); - parentemd.current = definition.getType(struct, 1); - } else { - throw new IllegalArgumentException(Metadata.error(ctx) + "A newly created array cannot have zero dimensions."); - } - } else if (newclass) { - final Constructor constructor = struct.constructors.get("new"); - - if (constructor != null) { - types = new Type[constructor.arguments.size()]; - constructor.arguments.toArray(types); - - newenmd.target = constructor; - newenmd.type = definition.getType(struct, 0); - parentemd.statement = !parentemd.read && newenmd.last; - parentemd.current = newenmd.type; - } else { - throw new IllegalArgumentException( - Metadata.error(ctx) + "Unknown new call on type [" + struct.name + "]."); - } - } else { - throw new IllegalArgumentException(Metadata.error(ctx) + "Unknown parser state."); - } - - if (size != types.length) { - throw new IllegalArgumentException(Metadata.error(ctx) + "When calling [" + name + "] on type " + - "[" + struct.name + "] expected [" + types.length + "] arguments," + - " but found [" + arguments.size() + "]."); - } - - for (int argument = 0; argument < size; ++argument) { - final ExpressionContext exprctx = metadata.updateExpressionTree(arguments.get(argument)); - final Metadata.ExpressionMetadata expremd = metadata.createExpressionMetadata(exprctx); - expremd.to = types[argument]; - visit(exprctx); - markCast(expremd); - } - - if (dotctx != null) { - metadata.createExtNodeMetadata(parent, dotctx); - visit(dotctx); - } else if (bracectx != null) { - metadata.createExtNodeMetadata(parent, bracectx); - visit(bracectx); - } + public Void visitExtnew(final ExtnewContext ctx) { + external.processExtnew(ctx); return null; } @Override public Void visitExtstring(final ExtstringContext ctx) { - final Metadata.ExtNodeMetadata memberenmd = metadata.getExtNodeMetadata(ctx); - final ParserRuleContext parent = memberenmd.parent; - final Metadata.ExternalMetadata parentemd = metadata.getExternalMetadata(parent); - - final String string = ctx.STRING().getText(); - - final ExtdotContext dotctx = ctx.extdot(); - final ExtbraceContext bracectx = ctx.extbrace(); - - memberenmd.last = parentemd.scope == 0 && dotctx == null && bracectx == null; - final boolean store = memberenmd.last && parentemd.storeExpr != null; - - if (parentemd.current != null) { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected String constant [" + string + "]."); - } - - if (!parentemd.read) { - throw new IllegalArgumentException(Metadata.error(ctx) + "Must read String constant [" + string + "]."); - } else if (store) { - throw new IllegalArgumentException( - Metadata.error(ctx) + "Cannot write to read-only String constant [" + string + "]."); - } - - memberenmd.target = string; - memberenmd.type = definition.stringType; - parentemd.current = definition.stringType; - - if (memberenmd.last) { - parentemd.constant = string; - } - - if (dotctx != null) { - metadata.createExtNodeMetadata(parent, dotctx); - visit(dotctx); - } else if (bracectx != null) { - metadata.createExtNodeMetadata(parent, bracectx); - visit(bracectx); - } + external.processExtstring(ctx); return null; } @Override public Void visitArguments(final ArgumentsContext ctx) { - throw new UnsupportedOperationException(Metadata.error(ctx) + "Unexpected parser state."); + throw new UnsupportedOperationException(AnalyzerUtility.error(ctx) + "Unexpected state."); } @Override - public Void visitIncrement(IncrementContext ctx) { - final Metadata.ExpressionMetadata incremd = metadata.getExpressionMetadata(ctx); - final Sort sort = incremd.to == null ? null : incremd.to.sort; - final boolean positive = ctx.INCR() != null; - - if (incremd.to == null) { - incremd.preConst = positive ? 1 : -1; - incremd.from = definition.intType; - } else { - switch (sort) { - case LONG: - incremd.preConst = positive ? 1L : -1L; - incremd.from = definition.longType; - break; - case FLOAT: - incremd.preConst = positive ? 1.0F : -1.0F; - incremd.from = definition.floatType; - break; - case DOUBLE: - incremd.preConst = positive ? 1.0 : -1.0; - incremd.from = definition.doubleType; - break; - default: - incremd.preConst = positive ? 1 : -1; - incremd.from = definition.intType; - } - } + public Void visitIncrement(final IncrementContext ctx) { + expression.processIncrement(ctx); return null; } - - private void analyzeLoadStoreExternal(final ParserRuleContext source) { - final Metadata.ExtNodeMetadata extenmd = metadata.getExtNodeMetadata(source); - final ParserRuleContext parent = extenmd.parent; - final Metadata.ExternalMetadata parentemd = metadata.getExternalMetadata(parent); - - if (extenmd.last && parentemd.storeExpr != null) { - final ParserRuleContext store = parentemd.storeExpr; - final Metadata.ExpressionMetadata storeemd = metadata.createExpressionMetadata(parentemd.storeExpr); - final int token = parentemd.token; - - if (token > 0) { - visit(store); - - final boolean add = token == ADD; - final boolean xor = token == BWAND || token == BWXOR || token == BWOR; - final boolean decimal = token == MUL || token == DIV || token == REM || token == SUB; - - extenmd.promote = add ? promoteAdd(extenmd.type, storeemd.from) : - xor ? promoteXor(extenmd.type, storeemd.from) : - promoteNumeric(extenmd.type, storeemd.from, decimal, true); - - if (extenmd.promote == null) { - throw new IllegalArgumentException("Cannot apply compound assignment to " + - "types [" + extenmd.type.name + "] and [" + storeemd.from.name + "]."); - } - - extenmd.castFrom = getLegalCast(source, extenmd.type, extenmd.promote, false); - extenmd.castTo = getLegalCast(source, extenmd.promote, extenmd.type, true); - - storeemd.to = add && extenmd.promote.sort == Sort.STRING ? storeemd.from : extenmd.promote; - markCast(storeemd); - } else { - storeemd.to = extenmd.type; - visit(store); - markCast(storeemd); - } - } - } - - private void markCast(final Metadata.ExpressionMetadata emd) { - if (emd.from == null) { - throw new IllegalStateException(Metadata.error(emd.source) + "From cast type should never be null."); - } - - if (emd.to != null) { - emd.cast = getLegalCast(emd.source, emd.from, emd.to, emd.explicit || !emd.typesafe); - - if (emd.preConst != null && emd.to.sort.constant) { - emd.postConst = constCast(emd.source, emd.preConst, emd.cast); - } - } else { - throw new IllegalStateException(Metadata.error(emd.source) + "To cast type should never be null."); - } - } - - private Cast getLegalCast(final ParserRuleContext source, final Type from, final Type to, final boolean explicit) { - final Cast cast = new Cast(from, to); - - if (from.equals(to)) { - return cast; - } - - if (from.sort == Sort.DEF && to.sort != Sort.VOID || from.sort != Sort.VOID && to.sort == Sort.DEF) { - final Transform transform = definition.transforms.get(cast); - - if (transform != null) { - return transform; - } - - return cast; - } - - switch (from.sort) { - case BOOL: - switch (to.sort) { - case OBJECT: - case BOOL_OBJ: - return checkTransform(source, cast); - } - - break; - case BYTE: - switch (to.sort) { - case SHORT: - case INT: - case LONG: - case FLOAT: - case DOUBLE: - return cast; - case CHAR: - if (explicit) - return cast; - - break; - case OBJECT: - case NUMBER: - case BYTE_OBJ: - case SHORT_OBJ: - case INT_OBJ: - case LONG_OBJ: - case FLOAT_OBJ: - case DOUBLE_OBJ: - return checkTransform(source, cast); - case CHAR_OBJ: - if (explicit) - return checkTransform(source, cast); - - break; - } - - break; - case SHORT: - switch (to.sort) { - case INT: - case LONG: - case FLOAT: - case DOUBLE: - return cast; - case BYTE: - case CHAR: - if (explicit) - return cast; - - break; - case OBJECT: - case NUMBER: - case SHORT_OBJ: - case INT_OBJ: - case LONG_OBJ: - case FLOAT_OBJ: - case DOUBLE_OBJ: - return checkTransform(source, cast); - case BYTE_OBJ: - case CHAR_OBJ: - if (explicit) - return checkTransform(source, cast); - - break; - } - - break; - case CHAR: - switch (to.sort) { - case INT: - case LONG: - case FLOAT: - case DOUBLE: - return cast; - case BYTE: - case SHORT: - if (explicit) - return cast; - - break; - case OBJECT: - case NUMBER: - case CHAR_OBJ: - case INT_OBJ: - case LONG_OBJ: - case FLOAT_OBJ: - case DOUBLE_OBJ: - return checkTransform(source, cast); - case BYTE_OBJ: - case SHORT_OBJ: - if (explicit) - return checkTransform(source, cast); - - break; - } - - break; - case INT: - switch (to.sort) { - case LONG: - case FLOAT: - case DOUBLE: - return cast; - case BYTE: - case SHORT: - case CHAR: - if (explicit) - return cast; - - break; - case OBJECT: - case NUMBER: - case INT_OBJ: - case LONG_OBJ: - case FLOAT_OBJ: - case DOUBLE_OBJ: - return checkTransform(source, cast); - case BYTE_OBJ: - case SHORT_OBJ: - case CHAR_OBJ: - if (explicit) - return checkTransform(source, cast); - - break; - } - - break; - case LONG: - switch (to.sort) { - case FLOAT: - case DOUBLE: - return cast; - case BYTE: - case SHORT: - case CHAR: - case INT: - if (explicit) - return cast; - - break; - case OBJECT: - case NUMBER: - case LONG_OBJ: - case FLOAT_OBJ: - case DOUBLE_OBJ: - return checkTransform(source, cast); - case BYTE_OBJ: - case SHORT_OBJ: - case CHAR_OBJ: - case INT_OBJ: - if (explicit) - return checkTransform(source, cast); - - break; - } - - break; - case FLOAT: - switch (to.sort) { - case DOUBLE: - return cast; - case BYTE: - case SHORT: - case CHAR: - case INT: - case LONG: - if (explicit) - return cast; - - break; - case OBJECT: - case NUMBER: - case FLOAT_OBJ: - case DOUBLE_OBJ: - return checkTransform(source, cast); - case BYTE_OBJ: - case SHORT_OBJ: - case CHAR_OBJ: - case INT_OBJ: - case LONG_OBJ: - if (explicit) - return checkTransform(source, cast); - - break; - } - - break; - case DOUBLE: - switch (to.sort) { - case BYTE: - case SHORT: - case CHAR: - case INT: - case LONG: - case FLOAT: - if (explicit) - return cast; - - break; - case OBJECT: - case NUMBER: - case DOUBLE_OBJ: - return checkTransform(source, cast); - case BYTE_OBJ: - case SHORT_OBJ: - case CHAR_OBJ: - case INT_OBJ: - case LONG_OBJ: - case FLOAT_OBJ: - if (explicit) - return checkTransform(source, cast); - - break; - } - - break; - case OBJECT: - case NUMBER: - switch (to.sort) { - case BYTE: - case SHORT: - case CHAR: - case INT: - case LONG: - case FLOAT: - case DOUBLE: - if (explicit) - return checkTransform(source, cast); - - break; - } - - break; - case BOOL_OBJ: - switch (to.sort) { - case BOOL: - return checkTransform(source, cast); - } - - break; - case BYTE_OBJ: - switch (to.sort) { - case BYTE: - case SHORT: - case INT: - case LONG: - case FLOAT: - case DOUBLE: - case SHORT_OBJ: - case INT_OBJ: - case LONG_OBJ: - case FLOAT_OBJ: - case DOUBLE_OBJ: - return checkTransform(source, cast); - case CHAR: - case CHAR_OBJ: - if (explicit) - return checkTransform(source, cast); - - break; - } - - break; - case SHORT_OBJ: - switch (to.sort) { - case SHORT: - case INT: - case LONG: - case FLOAT: - case DOUBLE: - case INT_OBJ: - case LONG_OBJ: - case FLOAT_OBJ: - case DOUBLE_OBJ: - return checkTransform(source, cast); - case BYTE: - case CHAR: - case BYTE_OBJ: - case CHAR_OBJ: - if (explicit) - return checkTransform(source, cast); - - break; - } - - break; - case CHAR_OBJ: - switch (to.sort) { - case CHAR: - case INT: - case LONG: - case FLOAT: - case DOUBLE: - case INT_OBJ: - case LONG_OBJ: - case FLOAT_OBJ: - case DOUBLE_OBJ: - return checkTransform(source, cast); - case BYTE: - case SHORT: - case BYTE_OBJ: - case SHORT_OBJ: - if (explicit) - return checkTransform(source, cast); - - break; - } - - break; - case INT_OBJ: - switch (to.sort) { - case INT: - case LONG: - case FLOAT: - case DOUBLE: - case LONG_OBJ: - case FLOAT_OBJ: - case DOUBLE_OBJ: - return checkTransform(source, cast); - case BYTE: - case SHORT: - case CHAR: - case BYTE_OBJ: - case SHORT_OBJ: - case CHAR_OBJ: - if (explicit) - return checkTransform(source, cast); - - break; - } - - break; - case LONG_OBJ: - switch (to.sort) { - case LONG: - case FLOAT: - case DOUBLE: - case FLOAT_OBJ: - case DOUBLE_OBJ: - return checkTransform(source, cast); - case BYTE: - case SHORT: - case CHAR: - case INT: - case BYTE_OBJ: - case SHORT_OBJ: - case CHAR_OBJ: - case INT_OBJ: - if (explicit) - return checkTransform(source, cast); - - break; - } - - break; - case FLOAT_OBJ: - switch (to.sort) { - case FLOAT: - case DOUBLE: - case DOUBLE_OBJ: - return checkTransform(source, cast); - case BYTE: - case SHORT: - case CHAR: - case INT: - case LONG: - case BYTE_OBJ: - case SHORT_OBJ: - case CHAR_OBJ: - case INT_OBJ: - case LONG_OBJ: - if (explicit) - return checkTransform(source, cast); - - break; - } - - break; - case DOUBLE_OBJ: - switch (to.sort) { - case DOUBLE: - return checkTransform(source, cast); - case BYTE: - case SHORT: - case CHAR: - case INT: - case LONG: - case FLOAT: - case BYTE_OBJ: - case SHORT_OBJ: - case CHAR_OBJ: - case INT_OBJ: - case LONG_OBJ: - case FLOAT_OBJ: - if (explicit) - return checkTransform(source, cast); - - break; - } - - break; - } - - try { - from.clazz.asSubclass(to.clazz); - - return cast; - } catch (final ClassCastException cce0) { - try { - if (explicit) { - to.clazz.asSubclass(from.clazz); - - return cast; - } else { - throw new ClassCastException( - Metadata.error(source) + "Cannot cast from [" + from.name + "] to [" + to.name + "]."); - } - } catch (final ClassCastException cce1) { - throw new ClassCastException( - Metadata.error(source) + "Cannot cast from [" + from.name + "] to [" + to.name + "]."); - } - } - } - - private Transform checkTransform(final ParserRuleContext source, final Cast cast) { - final Transform transform = definition.transforms.get(cast); - - if (transform == null) { - throw new ClassCastException( - Metadata.error(source) + "Cannot cast from [" + cast.from.name + "] to [" + cast.to.name + "]."); - } - - return transform; - } - - private Object constCast(final ParserRuleContext source, final Object constant, final Cast cast) { - if (cast instanceof Transform) { - final Transform transform = (Transform)cast; - return invokeTransform(source, transform, constant); - } else { - final Sort fsort = cast.from.sort; - final Sort tsort = cast.to.sort; - - if (fsort == tsort) { - return constant; - } else if (fsort.numeric && tsort.numeric) { - Number number; - - if (fsort == Sort.CHAR) { - number = (int)(char)constant; - } else { - number = (Number)constant; - } - - switch (tsort) { - case BYTE: return number.byteValue(); - case SHORT: return number.shortValue(); - case CHAR: return (char)number.intValue(); - case INT: return number.intValue(); - case LONG: return number.longValue(); - case FLOAT: return number.floatValue(); - case DOUBLE: return number.doubleValue(); - default: - throw new IllegalStateException(Metadata.error(source) + "Expected numeric type for cast."); - } - } else { - throw new IllegalStateException(Metadata.error(source) + "No valid constant cast from " + - "[" + cast.from.clazz.getCanonicalName() + "] to " + - "[" + cast.to.clazz.getCanonicalName() + "]."); - } - } - } - - private Object invokeTransform(final ParserRuleContext source, final Transform transform, final Object object) { - final Method method = transform.method; - final java.lang.reflect.Method jmethod = method.reflect; - final int modifiers = jmethod.getModifiers(); - - try { - if (java.lang.reflect.Modifier.isStatic(modifiers)) { - return jmethod.invoke(null, object); - } else { - return jmethod.invoke(object); - } - } catch (IllegalAccessException | IllegalArgumentException | - java.lang.reflect.InvocationTargetException | NullPointerException | - ExceptionInInitializerError exception) { - throw new IllegalStateException(Metadata.error(source) + "Unable to invoke transform to cast constant from " + - "[" + transform.from.name + "] to [" + transform.to.name + "]."); - } - } - - private Type promoteNumeric(final Type from, boolean decimal, boolean primitive) { - final Sort sort = from.sort; - - if (sort == Sort.DEF) { - return definition.defType; - } else if ((sort == Sort.DOUBLE || sort == Sort.DOUBLE_OBJ || sort == Sort.NUMBER) && decimal) { - return primitive ? definition.doubleType : definition.doubleobjType; - } else if ((sort == Sort.FLOAT || sort == Sort.FLOAT_OBJ) && decimal) { - return primitive ? definition.floatType : definition.floatobjType; - } else if (sort == Sort.LONG || sort == Sort.LONG_OBJ || sort == Sort.NUMBER) { - return primitive ? definition.longType : definition.longobjType; - } else if (sort.numeric) { - return primitive ? definition.intType : definition.intobjType; - } - - return null; - } - - private Type promoteNumeric(final Type from0, final Type from1, boolean decimal, boolean primitive) { - final Sort sort0 = from0.sort; - final Sort sort1 = from1.sort; - - if (sort0 == Sort.DEF || sort1 == Sort.DEF) { - return definition.defType; - } - - if (decimal) { - if (sort0 == Sort.DOUBLE || sort0 == Sort.DOUBLE_OBJ || sort0 == Sort.NUMBER || - sort1 == Sort.DOUBLE || sort1 == Sort.DOUBLE_OBJ || sort1 == Sort.NUMBER) { - return primitive ? definition.doubleType : definition.doubleobjType; - } else if (sort0 == Sort.FLOAT || sort0 == Sort.FLOAT_OBJ || sort1 == Sort.FLOAT || sort1 == Sort.FLOAT_OBJ) { - return primitive ? definition.floatType : definition.floatobjType; - } - } - - if (sort0 == Sort.LONG || sort0 == Sort.LONG_OBJ || sort0 == Sort.NUMBER || - sort1 == Sort.LONG || sort1 == Sort.LONG_OBJ || sort1 == Sort.NUMBER) { - return primitive ? definition.longType : definition.longobjType; - } else if (sort0.numeric && sort1.numeric) { - return primitive ? definition.intType : definition.intobjType; - } - - return null; - } - - private Type promoteAdd(final Type from0, final Type from1) { - final Sort sort0 = from0.sort; - final Sort sort1 = from1.sort; - - if (sort0 == Sort.STRING || sort1 == Sort.STRING) { - return definition.stringType; - } - - return promoteNumeric(from0, from1, true, true); - } - - private Type promoteXor(final Type from0, final Type from1) { - final Sort sort0 = from0.sort; - final Sort sort1 = from1.sort; - - if (sort0.bool || sort1.bool) { - return definition.booleanType; - } - - return promoteNumeric(from0, from1, false, true); - } - - private Type promoteEquality(final Type from0, final Type from1) { - final Sort sort0 = from0.sort; - final Sort sort1 = from1.sort; - - if (sort0 == Sort.DEF || sort1 == Sort.DEF) { - return definition.defType; - } - - final boolean primitive = sort0.primitive && sort1.primitive; - - if (sort0.bool && sort1.bool) { - return primitive ? definition.booleanType : definition.booleanobjType; - } - - if (sort0.numeric && sort1.numeric) { - return promoteNumeric(from0, from1, true, primitive); - } - - return definition.objectType; - } - - private Type promoteReference(final Type from0, final Type from1) { - final Sort sort0 = from0.sort; - final Sort sort1 = from1.sort; - - if (sort0 == Sort.DEF || sort1 == Sort.DEF) { - return definition.defType; - } - - if (sort0.primitive && sort1.primitive) { - if (sort0.bool && sort1.bool) { - return definition.booleanType; - } - - if (sort0.numeric && sort1.numeric) { - return promoteNumeric(from0, from1, true, true); - } - } - - return definition.objectType; - } - - private Type promoteConditional(final Type from0, final Type from1, final Object const0, final Object const1) { - if (from0.equals(from1)) { - return from0; - } - - final Sort sort0 = from0.sort; - final Sort sort1 = from1.sort; - - if (sort0 == Sort.DEF || sort1 == Sort.DEF) { - return definition.defType; - } - - final boolean primitive = sort0.primitive && sort1.primitive; - - if (sort0.bool && sort1.bool) { - return primitive ? definition.booleanType : definition.booleanobjType; - } - - if (sort0.numeric && sort1.numeric) { - if (sort0 == Sort.DOUBLE || sort0 == Sort.DOUBLE_OBJ || sort1 == Sort.DOUBLE || sort1 == Sort.DOUBLE_OBJ) { - return primitive ? definition.doubleType : definition.doubleobjType; - } else if (sort0 == Sort.FLOAT || sort0 == Sort.FLOAT_OBJ || sort1 == Sort.FLOAT || sort1 == Sort.FLOAT_OBJ) { - return primitive ? definition.floatType : definition.floatobjType; - } else if (sort0 == Sort.LONG || sort0 == Sort.LONG_OBJ || sort1 == Sort.LONG || sort1 == Sort.LONG_OBJ) { - return sort0.primitive && sort1.primitive ? definition.longType : definition.longobjType; - } else { - if (sort0 == Sort.BYTE || sort0 == Sort.BYTE_OBJ) { - if (sort1 == Sort.BYTE || sort1 == Sort.BYTE_OBJ) { - return primitive ? definition.byteType : definition.byteobjType; - } else if (sort1 == Sort.SHORT || sort1 == Sort.SHORT_OBJ) { - if (const1 != null) { - final short constant = (short)const1; - - if (constant <= Byte.MAX_VALUE && constant >= Byte.MIN_VALUE) { - return primitive ? definition.byteType : definition.byteobjType; - } - } - - return primitive ? definition.shortType : definition.shortobjType; - } else if (sort1 == Sort.CHAR || sort1 == Sort.CHAR_OBJ) { - return primitive ? definition.intType : definition.intobjType; - } else if (sort1 == Sort.INT || sort1 == Sort.INT_OBJ) { - if (const1 != null) { - final int constant = (int)const1; - - if (constant <= Byte.MAX_VALUE && constant >= Byte.MIN_VALUE) { - return primitive ? definition.byteType : definition.byteobjType; - } - } - - return primitive ? definition.intType : definition.intobjType; - } - } else if (sort0 == Sort.SHORT || sort0 == Sort.SHORT_OBJ) { - if (sort1 == Sort.BYTE || sort1 == Sort.BYTE_OBJ) { - if (const0 != null) { - final short constant = (short)const0; - - if (constant <= Byte.MAX_VALUE && constant >= Byte.MIN_VALUE) { - return primitive ? definition.byteType : definition.byteobjType; - } - } - - return primitive ? definition.shortType : definition.shortobjType; - } else if (sort1 == Sort.SHORT || sort1 == Sort.SHORT_OBJ) { - return primitive ? definition.shortType : definition.shortobjType; - } else if (sort1 == Sort.CHAR || sort1 == Sort.CHAR_OBJ) { - return primitive ? definition.intType : definition.intobjType; - } else if (sort1 == Sort.INT || sort1 == Sort.INT_OBJ) { - if (const1 != null) { - final int constant = (int)const1; - - if (constant <= Short.MAX_VALUE && constant >= Short.MIN_VALUE) { - return primitive ? definition.shortType : definition.shortobjType; - } - } - - return primitive ? definition.intType : definition.intobjType; - } - } else if (sort0 == Sort.CHAR || sort0 == Sort.CHAR_OBJ) { - if (sort1 == Sort.BYTE || sort1 == Sort.BYTE_OBJ) { - return primitive ? definition.intType : definition.intobjType; - } else if (sort1 == Sort.SHORT || sort1 == Sort.SHORT_OBJ) { - return primitive ? definition.intType : definition.intobjType; - } else if (sort1 == Sort.CHAR || sort1 == Sort.CHAR_OBJ) { - return primitive ? definition.charType : definition.charobjType; - } else if (sort1 == Sort.INT || sort1 == Sort.INT_OBJ) { - if (const1 != null) { - final int constant = (int)const1; - - if (constant <= Character.MAX_VALUE && constant >= Character.MIN_VALUE) { - return primitive ? definition.byteType : definition.byteobjType; - } - } - - return primitive ? definition.intType : definition.intobjType; - } - } else if (sort0 == Sort.INT || sort0 == Sort.INT_OBJ) { - if (sort1 == Sort.BYTE || sort1 == Sort.BYTE_OBJ) { - if (const0 != null) { - final int constant = (int)const0; - - if (constant <= Byte.MAX_VALUE && constant >= Byte.MIN_VALUE) { - return primitive ? definition.byteType : definition.byteobjType; - } - } - - return primitive ? definition.intType : definition.intobjType; - } else if (sort1 == Sort.SHORT || sort1 == Sort.SHORT_OBJ) { - if (const0 != null) { - final int constant = (int)const0; - - if (constant <= Short.MAX_VALUE && constant >= Short.MIN_VALUE) { - return primitive ? definition.byteType : definition.byteobjType; - } - } - - return primitive ? definition.intType : definition.intobjType; - } else if (sort1 == Sort.CHAR || sort1 == Sort.CHAR_OBJ) { - if (const0 != null) { - final int constant = (int)const0; - - if (constant <= Character.MAX_VALUE && constant >= Character.MIN_VALUE) { - return primitive ? definition.byteType : definition.byteobjType; - } - } - - return primitive ? definition.intType : definition.intobjType; - } else if (sort1 == Sort.INT || sort1 == Sort.INT_OBJ) { - return primitive ? definition.intType : definition.intobjType; - } - } - } - } - - final Pair pair = new Pair(from0, from1); - final Type bound = definition.bounds.get(pair); - - return bound == null ? definition.objectType : bound; - } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerCaster.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerCaster.java new file mode 100644 index 00000000000..46a510bc6bb --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerCaster.java @@ -0,0 +1,563 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +import org.antlr.v4.runtime.ParserRuleContext; +import org.elasticsearch.painless.Definition.Cast; +import org.elasticsearch.painless.Definition.Method; +import org.elasticsearch.painless.Definition.Sort; +import org.elasticsearch.painless.Definition.Transform; +import org.elasticsearch.painless.Definition.Type; +import org.elasticsearch.painless.Metadata.ExpressionMetadata; + +class AnalyzerCaster { + private final Definition definition; + + AnalyzerCaster(final Definition definition) { + this.definition = definition; + } + + void markCast(final ExpressionMetadata emd) { + if (emd.from == null) { + throw new IllegalStateException(AnalyzerUtility.error(emd.source) + "From cast type should never be null."); + } + + if (emd.to != null) { + emd.cast = getLegalCast(emd.source, emd.from, emd.to, emd.explicit || !emd.typesafe); + + if (emd.preConst != null && emd.to.sort.constant) { + emd.postConst = constCast(emd.source, emd.preConst, emd.cast); + } + } else { + throw new IllegalStateException(AnalyzerUtility.error(emd.source) + "To cast type should never be null."); + } + } + + Cast getLegalCast(final ParserRuleContext source, final Type from, final Type to, final boolean explicit) { + final Cast cast = new Cast(from, to); + + if (from.equals(to)) { + return cast; + } + + if (from.sort == Sort.DEF && to.sort != Sort.VOID || from.sort != Sort.VOID && to.sort == Sort.DEF) { + final Transform transform = definition.transforms.get(cast); + + if (transform != null) { + return transform; + } + + return cast; + } + + switch (from.sort) { + case BOOL: + switch (to.sort) { + case OBJECT: + case BOOL_OBJ: + return checkTransform(source, cast); + } + + break; + case BYTE: + switch (to.sort) { + case SHORT: + case INT: + case LONG: + case FLOAT: + case DOUBLE: + return cast; + case CHAR: + if (explicit) + return cast; + + break; + case OBJECT: + case NUMBER: + case BYTE_OBJ: + case SHORT_OBJ: + case INT_OBJ: + case LONG_OBJ: + case FLOAT_OBJ: + case DOUBLE_OBJ: + return checkTransform(source, cast); + case CHAR_OBJ: + if (explicit) + return checkTransform(source, cast); + + break; + } + + break; + case SHORT: + switch (to.sort) { + case INT: + case LONG: + case FLOAT: + case DOUBLE: + return cast; + case BYTE: + case CHAR: + if (explicit) + return cast; + + break; + case OBJECT: + case NUMBER: + case SHORT_OBJ: + case INT_OBJ: + case LONG_OBJ: + case FLOAT_OBJ: + case DOUBLE_OBJ: + return checkTransform(source, cast); + case BYTE_OBJ: + case CHAR_OBJ: + if (explicit) + return checkTransform(source, cast); + + break; + } + + break; + case CHAR: + switch (to.sort) { + case INT: + case LONG: + case FLOAT: + case DOUBLE: + return cast; + case BYTE: + case SHORT: + if (explicit) + return cast; + + break; + case OBJECT: + case NUMBER: + case CHAR_OBJ: + case INT_OBJ: + case LONG_OBJ: + case FLOAT_OBJ: + case DOUBLE_OBJ: + return checkTransform(source, cast); + case BYTE_OBJ: + case SHORT_OBJ: + if (explicit) + return checkTransform(source, cast); + + break; + } + + break; + case INT: + switch (to.sort) { + case LONG: + case FLOAT: + case DOUBLE: + return cast; + case BYTE: + case SHORT: + case CHAR: + if (explicit) + return cast; + + break; + case OBJECT: + case NUMBER: + case INT_OBJ: + case LONG_OBJ: + case FLOAT_OBJ: + case DOUBLE_OBJ: + return checkTransform(source, cast); + case BYTE_OBJ: + case SHORT_OBJ: + case CHAR_OBJ: + if (explicit) + return checkTransform(source, cast); + + break; + } + + break; + case LONG: + switch (to.sort) { + case FLOAT: + case DOUBLE: + return cast; + case BYTE: + case SHORT: + case CHAR: + case INT: + if (explicit) + return cast; + + break; + case OBJECT: + case NUMBER: + case LONG_OBJ: + case FLOAT_OBJ: + case DOUBLE_OBJ: + return checkTransform(source, cast); + case BYTE_OBJ: + case SHORT_OBJ: + case CHAR_OBJ: + case INT_OBJ: + if (explicit) + return checkTransform(source, cast); + + break; + } + + break; + case FLOAT: + switch (to.sort) { + case DOUBLE: + return cast; + case BYTE: + case SHORT: + case CHAR: + case INT: + case LONG: + if (explicit) + return cast; + + break; + case OBJECT: + case NUMBER: + case FLOAT_OBJ: + case DOUBLE_OBJ: + return checkTransform(source, cast); + case BYTE_OBJ: + case SHORT_OBJ: + case CHAR_OBJ: + case INT_OBJ: + case LONG_OBJ: + if (explicit) + return checkTransform(source, cast); + + break; + } + + break; + case DOUBLE: + switch (to.sort) { + case BYTE: + case SHORT: + case CHAR: + case INT: + case LONG: + case FLOAT: + if (explicit) + return cast; + + break; + case OBJECT: + case NUMBER: + case DOUBLE_OBJ: + return checkTransform(source, cast); + case BYTE_OBJ: + case SHORT_OBJ: + case CHAR_OBJ: + case INT_OBJ: + case LONG_OBJ: + case FLOAT_OBJ: + if (explicit) + return checkTransform(source, cast); + + break; + } + + break; + case OBJECT: + case NUMBER: + switch (to.sort) { + case BYTE: + case SHORT: + case CHAR: + case INT: + case LONG: + case FLOAT: + case DOUBLE: + if (explicit) + return checkTransform(source, cast); + + break; + } + + break; + case BOOL_OBJ: + switch (to.sort) { + case BOOL: + return checkTransform(source, cast); + } + + break; + case BYTE_OBJ: + switch (to.sort) { + case BYTE: + case SHORT: + case INT: + case LONG: + case FLOAT: + case DOUBLE: + case SHORT_OBJ: + case INT_OBJ: + case LONG_OBJ: + case FLOAT_OBJ: + case DOUBLE_OBJ: + return checkTransform(source, cast); + case CHAR: + case CHAR_OBJ: + if (explicit) + return checkTransform(source, cast); + + break; + } + + break; + case SHORT_OBJ: + switch (to.sort) { + case SHORT: + case INT: + case LONG: + case FLOAT: + case DOUBLE: + case INT_OBJ: + case LONG_OBJ: + case FLOAT_OBJ: + case DOUBLE_OBJ: + return checkTransform(source, cast); + case BYTE: + case CHAR: + case BYTE_OBJ: + case CHAR_OBJ: + if (explicit) + return checkTransform(source, cast); + + break; + } + + break; + case CHAR_OBJ: + switch (to.sort) { + case CHAR: + case INT: + case LONG: + case FLOAT: + case DOUBLE: + case INT_OBJ: + case LONG_OBJ: + case FLOAT_OBJ: + case DOUBLE_OBJ: + return checkTransform(source, cast); + case BYTE: + case SHORT: + case BYTE_OBJ: + case SHORT_OBJ: + if (explicit) + return checkTransform(source, cast); + + break; + } + + break; + case INT_OBJ: + switch (to.sort) { + case INT: + case LONG: + case FLOAT: + case DOUBLE: + case LONG_OBJ: + case FLOAT_OBJ: + case DOUBLE_OBJ: + return checkTransform(source, cast); + case BYTE: + case SHORT: + case CHAR: + case BYTE_OBJ: + case SHORT_OBJ: + case CHAR_OBJ: + if (explicit) + return checkTransform(source, cast); + + break; + } + + break; + case LONG_OBJ: + switch (to.sort) { + case LONG: + case FLOAT: + case DOUBLE: + case FLOAT_OBJ: + case DOUBLE_OBJ: + return checkTransform(source, cast); + case BYTE: + case SHORT: + case CHAR: + case INT: + case BYTE_OBJ: + case SHORT_OBJ: + case CHAR_OBJ: + case INT_OBJ: + if (explicit) + return checkTransform(source, cast); + + break; + } + + break; + case FLOAT_OBJ: + switch (to.sort) { + case FLOAT: + case DOUBLE: + case DOUBLE_OBJ: + return checkTransform(source, cast); + case BYTE: + case SHORT: + case CHAR: + case INT: + case LONG: + case BYTE_OBJ: + case SHORT_OBJ: + case CHAR_OBJ: + case INT_OBJ: + case LONG_OBJ: + if (explicit) + return checkTransform(source, cast); + + break; + } + + break; + case DOUBLE_OBJ: + switch (to.sort) { + case DOUBLE: + return checkTransform(source, cast); + case BYTE: + case SHORT: + case CHAR: + case INT: + case LONG: + case FLOAT: + case BYTE_OBJ: + case SHORT_OBJ: + case CHAR_OBJ: + case INT_OBJ: + case LONG_OBJ: + case FLOAT_OBJ: + if (explicit) + return checkTransform(source, cast); + + break; + } + + break; + } + + try { + from.clazz.asSubclass(to.clazz); + + return cast; + } catch (final ClassCastException cce0) { + try { + if (explicit) { + to.clazz.asSubclass(from.clazz); + + return cast; + } else { + throw new ClassCastException( + AnalyzerUtility.error(source) + "Cannot cast from [" + from.name + "] to [" + to.name + "]."); + } + } catch (final ClassCastException cce1) { + throw new ClassCastException( + AnalyzerUtility.error(source) + "Cannot cast from [" + from.name + "] to [" + to.name + "]."); + } + } + } + + private Transform checkTransform(final ParserRuleContext source, final Cast cast) { + final Transform transform = definition.transforms.get(cast); + + if (transform == null) { + throw new ClassCastException( + AnalyzerUtility.error(source) + "Cannot cast from [" + cast.from.name + "] to [" + cast.to.name + "]."); + } + + return transform; + } + + private Object constCast(final ParserRuleContext source, final Object constant, final Cast cast) { + if (cast instanceof Transform) { + final Transform transform = (Transform)cast; + return invokeTransform(source, transform, constant); + } else { + final Sort fsort = cast.from.sort; + final Sort tsort = cast.to.sort; + + if (fsort == tsort) { + return constant; + } else if (fsort.numeric && tsort.numeric) { + Number number; + + if (fsort == Sort.CHAR) { + number = (int)(char)constant; + } else { + number = (Number)constant; + } + + switch (tsort) { + case BYTE: return number.byteValue(); + case SHORT: return number.shortValue(); + case CHAR: return (char)number.intValue(); + case INT: return number.intValue(); + case LONG: return number.longValue(); + case FLOAT: return number.floatValue(); + case DOUBLE: return number.doubleValue(); + default: + throw new IllegalStateException(AnalyzerUtility.error(source) + "Expected numeric type for cast."); + } + } else { + throw new IllegalStateException(AnalyzerUtility.error(source) + "No valid constant cast from " + + "[" + cast.from.clazz.getCanonicalName() + "] to " + + "[" + cast.to.clazz.getCanonicalName() + "]."); + } + } + } + + private Object invokeTransform(final ParserRuleContext source, final Transform transform, final Object object) { + final Method method = transform.method; + final java.lang.reflect.Method jmethod = method.reflect; + final int modifiers = jmethod.getModifiers(); + + try { + if (java.lang.reflect.Modifier.isStatic(modifiers)) { + return jmethod.invoke(null, object); + } else { + return jmethod.invoke(object); + } + } catch (IllegalAccessException | IllegalArgumentException | + java.lang.reflect.InvocationTargetException | NullPointerException | + ExceptionInInitializerError exception) { + throw new IllegalStateException(AnalyzerUtility.error(source) + "Unable to invoke transform to cast constant from " + + "[" + transform.from.name + "] to [" + transform.to.name + "]."); + } + } +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerExpression.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerExpression.java new file mode 100644 index 00000000000..3e74259fecf --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerExpression.java @@ -0,0 +1,868 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +import org.elasticsearch.painless.Definition.Sort; +import org.elasticsearch.painless.Definition.Type; +import org.elasticsearch.painless.Metadata.ExpressionMetadata; +import org.elasticsearch.painless.Metadata.ExternalMetadata; +import org.elasticsearch.painless.PainlessParser.AssignmentContext; +import org.elasticsearch.painless.PainlessParser.BinaryContext; +import org.elasticsearch.painless.PainlessParser.BoolContext; +import org.elasticsearch.painless.PainlessParser.CastContext; +import org.elasticsearch.painless.PainlessParser.CharContext; +import org.elasticsearch.painless.PainlessParser.CompContext; +import org.elasticsearch.painless.PainlessParser.ConditionalContext; +import org.elasticsearch.painless.PainlessParser.DecltypeContext; +import org.elasticsearch.painless.PainlessParser.ExpressionContext; +import org.elasticsearch.painless.PainlessParser.ExternalContext; +import org.elasticsearch.painless.PainlessParser.ExtstartContext; +import org.elasticsearch.painless.PainlessParser.FalseContext; +import org.elasticsearch.painless.PainlessParser.IncrementContext; +import org.elasticsearch.painless.PainlessParser.NullContext; +import org.elasticsearch.painless.PainlessParser.NumericContext; +import org.elasticsearch.painless.PainlessParser.PostincContext; +import org.elasticsearch.painless.PainlessParser.PreincContext; +import org.elasticsearch.painless.PainlessParser.TrueContext; +import org.elasticsearch.painless.PainlessParser.UnaryContext; + +import static org.elasticsearch.painless.PainlessParser.ADD; +import static org.elasticsearch.painless.PainlessParser.BWAND; +import static org.elasticsearch.painless.PainlessParser.BWOR; +import static org.elasticsearch.painless.PainlessParser.BWXOR; +import static org.elasticsearch.painless.PainlessParser.DIV; +import static org.elasticsearch.painless.PainlessParser.LSH; +import static org.elasticsearch.painless.PainlessParser.MUL; +import static org.elasticsearch.painless.PainlessParser.REM; +import static org.elasticsearch.painless.PainlessParser.RSH; +import static org.elasticsearch.painless.PainlessParser.SUB; +import static org.elasticsearch.painless.PainlessParser.USH; + +class AnalyzerExpression { + private final Metadata metadata; + private final Definition definition; + private final CompilerSettings settings; + + private final Analyzer analyzer; + private final AnalyzerCaster caster; + private final AnalyzerPromoter promoter; + + AnalyzerExpression(final Metadata metadata, final Analyzer analyzer, + final AnalyzerCaster caster, final AnalyzerPromoter promoter) { + this.metadata = metadata; + this.definition = metadata.definition; + this.settings = metadata.settings; + + this.analyzer = analyzer; + this.caster = caster; + this.promoter = promoter; + } + + void processNumeric(final NumericContext ctx) { + final ExpressionMetadata numericemd = metadata.getExpressionMetadata(ctx); + final boolean negate = ctx.parent instanceof UnaryContext && ((UnaryContext)ctx.parent).SUB() != null; + + if (ctx.DECIMAL() != null) { + final String svalue = (negate ? "-" : "") + ctx.DECIMAL().getText(); + + if (svalue.endsWith("f") || svalue.endsWith("F")) { + try { + numericemd.from = definition.floatType; + numericemd.preConst = Float.parseFloat(svalue.substring(0, svalue.length() - 1)); + } catch (NumberFormatException exception) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Invalid float constant [" + svalue + "]."); + } + } else { + try { + numericemd.from = definition.doubleType; + numericemd.preConst = Double.parseDouble(svalue); + } catch (NumberFormatException exception) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Invalid double constant [" + svalue + "]."); + } + } + } else { + String svalue = negate ? "-" : ""; + int radix; + + if (ctx.OCTAL() != null) { + svalue += ctx.OCTAL().getText(); + radix = 8; + } else if (ctx.INTEGER() != null) { + svalue += ctx.INTEGER().getText(); + radix = 10; + } else if (ctx.HEX() != null) { + svalue += ctx.HEX().getText(); + radix = 16; + } else { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + + if (svalue.endsWith("d") || svalue.endsWith("D")) { + try { + numericemd.from = definition.doubleType; + numericemd.preConst = Double.parseDouble(svalue.substring(0, svalue.length() - 1)); + } catch (NumberFormatException exception) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Invalid float constant [" + svalue + "]."); + } + } else if (svalue.endsWith("f") || svalue.endsWith("F")) { + try { + numericemd.from = definition.floatType; + numericemd.preConst = Float.parseFloat(svalue.substring(0, svalue.length() - 1)); + } catch (NumberFormatException exception) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Invalid float constant [" + svalue + "]."); + } + } else if (svalue.endsWith("l") || svalue.endsWith("L")) { + try { + numericemd.from = definition.longType; + numericemd.preConst = Long.parseLong(svalue.substring(0, svalue.length() - 1), radix); + } catch (NumberFormatException exception) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Invalid long constant [" + svalue + "]."); + } + } else { + try { + final Type type = numericemd.to; + final Sort sort = type == null ? Sort.INT : type.sort; + final int value = Integer.parseInt(svalue, radix); + + if (sort == Sort.BYTE && value >= Byte.MIN_VALUE && value <= Byte.MAX_VALUE) { + numericemd.from = definition.byteType; + numericemd.preConst = (byte)value; + } else if (sort == Sort.CHAR && value >= Character.MIN_VALUE && value <= Character.MAX_VALUE) { + numericemd.from = definition.charType; + numericemd.preConst = (char)value; + } else if (sort == Sort.SHORT && value >= Short.MIN_VALUE && value <= Short.MAX_VALUE) { + numericemd.from = definition.shortType; + numericemd.preConst = (short)value; + } else { + numericemd.from = definition.intType; + numericemd.preConst = value; + } + } catch (NumberFormatException exception) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Invalid int constant [" + svalue + "]."); + } + } + } + } + + void processChar(final CharContext ctx) { + final ExpressionMetadata charemd = metadata.getExpressionMetadata(ctx); + + if (ctx.CHAR() == null) { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + + charemd.preConst = ctx.CHAR().getText().charAt(0); + charemd.from = definition.charType; + } + + void processTrue(final TrueContext ctx) { + final ExpressionMetadata trueemd = metadata.getExpressionMetadata(ctx); + + if (ctx.TRUE() == null) { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + + trueemd.preConst = true; + trueemd.from = definition.booleanType; + } + + void processFalse(final FalseContext ctx) { + final ExpressionMetadata falseemd = metadata.getExpressionMetadata(ctx); + + if (ctx.FALSE() == null) { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + + falseemd.preConst = false; + falseemd.from = definition.booleanType; + } + + void processNull(final NullContext ctx) { + final ExpressionMetadata nullemd = metadata.getExpressionMetadata(ctx); + + if (ctx.NULL() == null) { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + + nullemd.isNull = true; + + if (nullemd.to != null) { + if (nullemd.to.sort.primitive) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + + "Cannot cast null to a primitive type [" + nullemd.to.name + "]."); + } + + nullemd.from = nullemd.to; + } else { + nullemd.from = definition.objectType; + } + } + + void processExternal(final ExternalContext ctx) { + final ExpressionMetadata extemd = metadata.getExpressionMetadata(ctx); + + final ExtstartContext extstartctx = ctx.extstart(); + final ExternalMetadata extstartemd = metadata.createExternalMetadata(extstartctx); + extstartemd.read = extemd.read; + analyzer.visit(extstartctx); + + extemd.statement = extstartemd.statement; + extemd.preConst = extstartemd.constant; + extemd.from = extstartemd.current; + extemd.typesafe = extstartemd.current.sort != Sort.DEF; + } + + void processPostinc(final PostincContext ctx) { + final ExpressionMetadata postincemd = metadata.getExpressionMetadata(ctx); + + final ExtstartContext extstartctx = ctx.extstart(); + final ExternalMetadata extstartemd = metadata.createExternalMetadata(extstartctx); + extstartemd.read = postincemd.read; + extstartemd.storeExpr = ctx.increment(); + extstartemd.token = ADD; + extstartemd.post = true; + analyzer.visit(extstartctx); + + postincemd.statement = true; + postincemd.from = extstartemd.read ? extstartemd.current : definition.voidType; + postincemd.typesafe = extstartemd.current.sort != Sort.DEF; + } + + void processPreinc(final PreincContext ctx) { + final ExpressionMetadata preincemd = metadata.getExpressionMetadata(ctx); + + final ExtstartContext extstartctx = ctx.extstart(); + final ExternalMetadata extstartemd = metadata.createExternalMetadata(extstartctx); + extstartemd.read = preincemd.read; + extstartemd.storeExpr = ctx.increment(); + extstartemd.token = ADD; + extstartemd.pre = true; + analyzer.visit(extstartctx); + + preincemd.statement = true; + preincemd.from = extstartemd.read ? extstartemd.current : definition.voidType; + preincemd.typesafe = extstartemd.current.sort != Sort.DEF; + } + + void processUnary(final UnaryContext ctx) { + final ExpressionMetadata unaryemd = metadata.getExpressionMetadata(ctx); + + final ExpressionContext exprctx = AnalyzerUtility.updateExpressionTree(ctx.expression()); + final ExpressionMetadata expremd = metadata.createExpressionMetadata(exprctx); + + if (ctx.BOOLNOT() != null) { + expremd.to = definition.booleanType; + analyzer.visit(exprctx); + caster.markCast(expremd); + + if (expremd.postConst != null) { + unaryemd.preConst = !(boolean)expremd.postConst; + } + + unaryemd.from = definition.booleanType; + } else if (ctx.BWNOT() != null || ctx.ADD() != null || ctx.SUB() != null) { + analyzer.visit(exprctx); + + final Type promote = promoter.promoteNumeric(expremd.from, ctx.BWNOT() == null, true); + + if (promote == null) { + throw new ClassCastException(AnalyzerUtility.error(ctx) + "Cannot apply [" + ctx.getChild(0).getText() + "] " + + "operation to type [" + expremd.from.name + "]."); + } + + expremd.to = promote; + caster.markCast(expremd); + + if (expremd.postConst != null) { + final Sort sort = promote.sort; + + if (ctx.BWNOT() != null) { + if (sort == Sort.INT) { + unaryemd.preConst = ~(int)expremd.postConst; + } else if (sort == Sort.LONG) { + unaryemd.preConst = ~(long)expremd.postConst; + } else { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + } else if (ctx.SUB() != null) { + if (exprctx instanceof NumericContext) { + unaryemd.preConst = expremd.postConst; + } else { + if (sort == Sort.INT) { + if (settings.getNumericOverflow()) { + unaryemd.preConst = -(int)expremd.postConst; + } else { + unaryemd.preConst = Math.negateExact((int)expremd.postConst); + } + } else if (sort == Sort.LONG) { + if (settings.getNumericOverflow()) { + unaryemd.preConst = -(long)expremd.postConst; + } else { + unaryemd.preConst = Math.negateExact((long)expremd.postConst); + } + } else if (sort == Sort.FLOAT) { + unaryemd.preConst = -(float)expremd.postConst; + } else if (sort == Sort.DOUBLE) { + unaryemd.preConst = -(double)expremd.postConst; + } else { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + } + } else if (ctx.ADD() != null) { + if (sort == Sort.INT) { + unaryemd.preConst = +(int)expremd.postConst; + } else if (sort == Sort.LONG) { + unaryemd.preConst = +(long)expremd.postConst; + } else if (sort == Sort.FLOAT) { + unaryemd.preConst = +(float)expremd.postConst; + } else if (sort == Sort.DOUBLE) { + unaryemd.preConst = +(double)expremd.postConst; + } else { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + } else { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + } + + unaryemd.from = promote; + unaryemd.typesafe = expremd.typesafe; + } else { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + } + + void processCast(final CastContext ctx) { + final ExpressionMetadata castemd = metadata.getExpressionMetadata(ctx); + + final DecltypeContext decltypectx = ctx.decltype(); + final ExpressionMetadata decltypemd = metadata.createExpressionMetadata(decltypectx); + analyzer.visit(decltypectx); + + final Type type = decltypemd.from; + castemd.from = type; + + final ExpressionContext exprctx = AnalyzerUtility.updateExpressionTree(ctx.expression()); + final ExpressionMetadata expremd = metadata.createExpressionMetadata(exprctx); + expremd.to = type; + expremd.explicit = true; + analyzer.visit(exprctx); + caster.markCast(expremd); + + if (expremd.postConst != null) { + castemd.preConst = expremd.postConst; + } + + castemd.typesafe = expremd.typesafe && castemd.from.sort != Sort.DEF; + } + + void processBinary(final BinaryContext ctx) { + final ExpressionMetadata binaryemd = metadata.getExpressionMetadata(ctx); + + final ExpressionContext exprctx0 = AnalyzerUtility.updateExpressionTree(ctx.expression(0)); + final ExpressionMetadata expremd0 = metadata.createExpressionMetadata(exprctx0); + analyzer.visit(exprctx0); + + final ExpressionContext exprctx1 = AnalyzerUtility.updateExpressionTree(ctx.expression(1)); + final ExpressionMetadata expremd1 = metadata.createExpressionMetadata(exprctx1); + analyzer.visit(exprctx1); + + final boolean decimal = ctx.MUL() != null || ctx.DIV() != null || ctx.REM() != null || ctx.SUB() != null; + final boolean add = ctx.ADD() != null; + final boolean xor = ctx.BWXOR() != null; + final Type promote = add ? promoter.promoteAdd(expremd0.from, expremd1.from) : + xor ? promoter.promoteXor(expremd0.from, expremd1.from) : + promoter.promoteNumeric(expremd0.from, expremd1.from, decimal, true); + + if (promote == null) { + throw new ClassCastException(AnalyzerUtility.error(ctx) + "Cannot apply [" + ctx.getChild(1).getText() + "] " + + "operation to types [" + expremd0.from.name + "] and [" + expremd1.from.name + "]."); + } + + final Sort sort = promote.sort; + expremd0.to = add && sort == Sort.STRING ? expremd0.from : promote; + expremd1.to = add && sort == Sort.STRING ? expremd1.from : promote; + caster.markCast(expremd0); + caster.markCast(expremd1); + + if (expremd0.postConst != null && expremd1.postConst != null) { + if (ctx.MUL() != null) { + if (sort == Sort.INT) { + if (settings.getNumericOverflow()) { + binaryemd.preConst = (int)expremd0.postConst * (int)expremd1.postConst; + } else { + binaryemd.preConst = Math.multiplyExact((int)expremd0.postConst, (int)expremd1.postConst); + } + } else if (sort == Sort.LONG) { + if (settings.getNumericOverflow()) { + binaryemd.preConst = (long)expremd0.postConst * (long)expremd1.postConst; + } else { + binaryemd.preConst = Math.multiplyExact((long)expremd0.postConst, (long)expremd1.postConst); + } + } else if (sort == Sort.FLOAT) { + if (settings.getNumericOverflow()) { + binaryemd.preConst = (float)expremd0.postConst * (float)expremd1.postConst; + } else { + binaryemd.preConst = Utility.multiplyWithoutOverflow((float)expremd0.postConst, (float)expremd1.postConst); + } + } else if (sort == Sort.DOUBLE) { + if (settings.getNumericOverflow()) { + binaryemd.preConst = (double)expremd0.postConst * (double)expremd1.postConst; + } else { + binaryemd.preConst = Utility.multiplyWithoutOverflow((double)expremd0.postConst, (double)expremd1.postConst); + } + } else { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + } else if (ctx.DIV() != null) { + if (sort == Sort.INT) { + if (settings.getNumericOverflow()) { + binaryemd.preConst = (int)expremd0.postConst / (int)expremd1.postConst; + } else { + binaryemd.preConst = Utility.divideWithoutOverflow((int)expremd0.postConst, (int)expremd1.postConst); + } + } else if (sort == Sort.LONG) { + if (settings.getNumericOverflow()) { + binaryemd.preConst = (long)expremd0.postConst / (long)expremd1.postConst; + } else { + binaryemd.preConst = Utility.divideWithoutOverflow((long)expremd0.postConst, (long)expremd1.postConst); + } + } else if (sort == Sort.FLOAT) { + if (settings.getNumericOverflow()) { + binaryemd.preConst = (float)expremd0.postConst / (float)expremd1.postConst; + } else { + binaryemd.preConst = Utility.divideWithoutOverflow((float)expremd0.postConst, (float)expremd1.postConst); + } + } else if (sort == Sort.DOUBLE) { + if (settings.getNumericOverflow()) { + binaryemd.preConst = (double)expremd0.postConst / (double)expremd1.postConst; + } else { + binaryemd.preConst = Utility.divideWithoutOverflow((double)expremd0.postConst, (double)expremd1.postConst); + } + } else { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + } else if (ctx.REM() != null) { + if (sort == Sort.INT) { + binaryemd.preConst = (int)expremd0.postConst % (int)expremd1.postConst; + } else if (sort == Sort.LONG) { + binaryemd.preConst = (long)expremd0.postConst % (long)expremd1.postConst; + } else if (sort == Sort.FLOAT) { + if (settings.getNumericOverflow()) { + binaryemd.preConst = (float)expremd0.postConst % (float)expremd1.postConst; + } else { + binaryemd.preConst = Utility.remainderWithoutOverflow((float)expremd0.postConst, (float)expremd1.postConst); + } + } else if (sort == Sort.DOUBLE) { + if (settings.getNumericOverflow()) { + binaryemd.preConst = (double)expremd0.postConst % (double)expremd1.postConst; + } else { + binaryemd.preConst = Utility.remainderWithoutOverflow((double)expremd0.postConst, (double)expremd1.postConst); + } + } else { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + } else if (ctx.ADD() != null) { + if (sort == Sort.INT) { + if (settings.getNumericOverflow()) { + binaryemd.preConst = (int)expremd0.postConst + (int)expremd1.postConst; + } else { + binaryemd.preConst = Math.addExact((int)expremd0.postConst, (int)expremd1.postConst); + } + } else if (sort == Sort.LONG) { + if (settings.getNumericOverflow()) { + binaryemd.preConst = (long)expremd0.postConst + (long)expremd1.postConst; + } else { + binaryemd.preConst = Math.addExact((long)expremd0.postConst, (long)expremd1.postConst); + } + } else if (sort == Sort.FLOAT) { + if (settings.getNumericOverflow()) { + binaryemd.preConst = (float)expremd0.postConst + (float)expremd1.postConst; + } else { + binaryemd.preConst = Utility.addWithoutOverflow((float)expremd0.postConst, (float)expremd1.postConst); + } + } else if (sort == Sort.DOUBLE) { + if (settings.getNumericOverflow()) { + binaryemd.preConst = (double)expremd0.postConst + (double)expremd1.postConst; + } else { + binaryemd.preConst = Utility.addWithoutOverflow((double)expremd0.postConst, (double)expremd1.postConst); + } + } else if (sort == Sort.STRING) { + binaryemd.preConst = "" + expremd0.postConst + expremd1.postConst; + } else { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + } else if (ctx.SUB() != null) { + if (sort == Sort.INT) { + if (settings.getNumericOverflow()) { + binaryemd.preConst = (int)expremd0.postConst - (int)expremd1.postConst; + } else { + binaryemd.preConst = Math.subtractExact((int)expremd0.postConst, (int)expremd1.postConst); + } + } else if (sort == Sort.LONG) { + if (settings.getNumericOverflow()) { + binaryemd.preConst = (long)expremd0.postConst - (long)expremd1.postConst; + } else { + binaryemd.preConst = Math.subtractExact((long)expremd0.postConst, (long)expremd1.postConst); + } + } else if (sort == Sort.FLOAT) { + if (settings.getNumericOverflow()) { + binaryemd.preConst = (float)expremd0.postConst - (float)expremd1.postConst; + } else { + binaryemd.preConst = Utility.subtractWithoutOverflow((float)expremd0.postConst, (float)expremd1.postConst); + } + } else if (sort == Sort.DOUBLE) { + if (settings.getNumericOverflow()) { + binaryemd.preConst = (double)expremd0.postConst - (double)expremd1.postConst; + } else { + binaryemd.preConst = Utility.subtractWithoutOverflow((double)expremd0.postConst, (double)expremd1.postConst); + } + } else { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + } else if (ctx.LSH() != null) { + if (sort == Sort.INT) { + binaryemd.preConst = (int)expremd0.postConst << (int)expremd1.postConst; + } else if (sort == Sort.LONG) { + binaryemd.preConst = (long)expremd0.postConst << (long)expremd1.postConst; + } else { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + } else if (ctx.RSH() != null) { + if (sort == Sort.INT) { + binaryemd.preConst = (int)expremd0.postConst >> (int)expremd1.postConst; + } else if (sort == Sort.LONG) { + binaryemd.preConst = (long)expremd0.postConst >> (long)expremd1.postConst; + } else { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + } else if (ctx.USH() != null) { + if (sort == Sort.INT) { + binaryemd.preConst = (int)expremd0.postConst >>> (int)expremd1.postConst; + } else if (sort == Sort.LONG) { + binaryemd.preConst = (long)expremd0.postConst >>> (long)expremd1.postConst; + } else { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + } else if (ctx.BWAND() != null) { + if (sort == Sort.INT) { + binaryemd.preConst = (int)expremd0.postConst & (int)expremd1.postConst; + } else if (sort == Sort.LONG) { + binaryemd.preConst = (long)expremd0.postConst & (long)expremd1.postConst; + } else { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + } else if (ctx.BWXOR() != null) { + if (sort == Sort.BOOL) { + binaryemd.preConst = (boolean)expremd0.postConst ^ (boolean)expremd1.postConst; + } else if (sort == Sort.INT) { + binaryemd.preConst = (int)expremd0.postConst ^ (int)expremd1.postConst; + } else if (sort == Sort.LONG) { + binaryemd.preConst = (long)expremd0.postConst ^ (long)expremd1.postConst; + } else { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + } else if (ctx.BWOR() != null) { + if (sort == Sort.INT) { + binaryemd.preConst = (int)expremd0.postConst | (int)expremd1.postConst; + } else if (sort == Sort.LONG) { + binaryemd.preConst = (long)expremd0.postConst | (long)expremd1.postConst; + } else { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + } else { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + } + + binaryemd.from = promote; + binaryemd.typesafe = expremd0.typesafe && expremd1.typesafe; + } + + void processComp(final CompContext ctx) { + final ExpressionMetadata compemd = metadata.getExpressionMetadata(ctx); + final boolean equality = ctx.EQ() != null || ctx.NE() != null; + final boolean reference = ctx.EQR() != null || ctx.NER() != null; + + final ExpressionContext exprctx0 = AnalyzerUtility.updateExpressionTree(ctx.expression(0)); + final ExpressionMetadata expremd0 = metadata.createExpressionMetadata(exprctx0); + analyzer.visit(exprctx0); + + final ExpressionContext exprctx1 = AnalyzerUtility.updateExpressionTree(ctx.expression(1)); + final ExpressionMetadata expremd1 = metadata.createExpressionMetadata(exprctx1); + analyzer.visit(exprctx1); + + if (expremd0.isNull && expremd1.isNull) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Unnecessary comparison of null constants."); + } + + final Type promote = equality ? promoter.promoteEquality(expremd0.from, expremd1.from) : + reference ? promoter.promoteReference(expremd0.from, expremd1.from) : + promoter.promoteNumeric(expremd0.from, expremd1.from, true, true); + + if (promote == null) { + throw new ClassCastException(AnalyzerUtility.error(ctx) + "Cannot apply [" + ctx.getChild(1).getText() + "] " + + "operation to types [" + expremd0.from.name + "] and [" + expremd1.from.name + "]."); + } + + expremd0.to = promote; + expremd1.to = promote; + caster.markCast(expremd0); + caster.markCast(expremd1); + + if (expremd0.postConst != null && expremd1.postConst != null) { + final Sort sort = promote.sort; + + if (ctx.EQ() != null || ctx.EQR() != null) { + if (sort == Sort.BOOL) { + compemd.preConst = (boolean)expremd0.postConst == (boolean)expremd1.postConst; + } else if (sort == Sort.INT) { + compemd.preConst = (int)expremd0.postConst == (int)expremd1.postConst; + } else if (sort == Sort.LONG) { + compemd.preConst = (long)expremd0.postConst == (long)expremd1.postConst; + } else if (sort == Sort.FLOAT) { + compemd.preConst = (float)expremd0.postConst == (float)expremd1.postConst; + } else if (sort == Sort.DOUBLE) { + compemd.preConst = (double)expremd0.postConst == (double)expremd1.postConst; + } else { + if (ctx.EQ() != null && !expremd0.isNull && !expremd1.isNull) { + compemd.preConst = expremd0.postConst.equals(expremd1.postConst); + } else if (ctx.EQR() != null) { + compemd.preConst = expremd0.postConst == expremd1.postConst; + } else { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + } + } else if (ctx.NE() != null || ctx.NER() != null) { + if (sort == Sort.BOOL) { + compemd.preConst = (boolean)expremd0.postConst != (boolean)expremd1.postConst; + } else if (sort == Sort.INT) { + compemd.preConst = (int)expremd0.postConst != (int)expremd1.postConst; + } else if (sort == Sort.LONG) { + compemd.preConst = (long)expremd0.postConst != (long)expremd1.postConst; + } else if (sort == Sort.FLOAT) { + compemd.preConst = (float)expremd0.postConst != (float)expremd1.postConst; + } else if (sort == Sort.DOUBLE) { + compemd.preConst = (double)expremd0.postConst != (double)expremd1.postConst; + } else { + if (ctx.NE() != null && !expremd0.isNull && !expremd1.isNull) { + compemd.preConst = expremd0.postConst.equals(expremd1.postConst); + } else if (ctx.NER() != null) { + compemd.preConst = expremd0.postConst == expremd1.postConst; + } else { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + } + } else if (ctx.GTE() != null) { + if (sort == Sort.INT) { + compemd.preConst = (int)expremd0.postConst >= (int)expremd1.postConst; + } else if (sort == Sort.LONG) { + compemd.preConst = (long)expremd0.postConst >= (long)expremd1.postConst; + } else if (sort == Sort.FLOAT) { + compemd.preConst = (float)expremd0.postConst >= (float)expremd1.postConst; + } else if (sort == Sort.DOUBLE) { + compemd.preConst = (double)expremd0.postConst >= (double)expremd1.postConst; + } else { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + } else if (ctx.GT() != null) { + if (sort == Sort.INT) { + compemd.preConst = (int)expremd0.postConst > (int)expremd1.postConst; + } else if (sort == Sort.LONG) { + compemd.preConst = (long)expremd0.postConst > (long)expremd1.postConst; + } else if (sort == Sort.FLOAT) { + compemd.preConst = (float)expremd0.postConst > (float)expremd1.postConst; + } else if (sort == Sort.DOUBLE) { + compemd.preConst = (double)expremd0.postConst > (double)expremd1.postConst; + } else { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + } else if (ctx.LTE() != null) { + if (sort == Sort.INT) { + compemd.preConst = (int)expremd0.postConst <= (int)expremd1.postConst; + } else if (sort == Sort.LONG) { + compemd.preConst = (long)expremd0.postConst <= (long)expremd1.postConst; + } else if (sort == Sort.FLOAT) { + compemd.preConst = (float)expremd0.postConst <= (float)expremd1.postConst; + } else if (sort == Sort.DOUBLE) { + compemd.preConst = (double)expremd0.postConst <= (double)expremd1.postConst; + } else { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + } else if (ctx.LT() != null) { + if (sort == Sort.INT) { + compemd.preConst = (int)expremd0.postConst < (int)expremd1.postConst; + } else if (sort == Sort.LONG) { + compemd.preConst = (long)expremd0.postConst < (long)expremd1.postConst; + } else if (sort == Sort.FLOAT) { + compemd.preConst = (float)expremd0.postConst < (float)expremd1.postConst; + } else if (sort == Sort.DOUBLE) { + compemd.preConst = (double)expremd0.postConst < (double)expremd1.postConst; + } else { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + } else { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + } + + compemd.from = definition.booleanType; + compemd.typesafe = expremd0.typesafe && expremd1.typesafe; + } + + void processBool(final BoolContext ctx) { + final ExpressionMetadata boolemd = metadata.getExpressionMetadata(ctx); + + final ExpressionContext exprctx0 = AnalyzerUtility.updateExpressionTree(ctx.expression(0)); + final ExpressionMetadata expremd0 = metadata.createExpressionMetadata(exprctx0); + expremd0.to = definition.booleanType; + analyzer.visit(exprctx0); + caster.markCast(expremd0); + + final ExpressionContext exprctx1 = AnalyzerUtility.updateExpressionTree(ctx.expression(1)); + final ExpressionMetadata expremd1 = metadata.createExpressionMetadata(exprctx1); + expremd1.to = definition.booleanType; + analyzer.visit(exprctx1); + caster.markCast(expremd1); + + if (expremd0.postConst != null && expremd1.postConst != null) { + if (ctx.BOOLAND() != null) { + boolemd.preConst = (boolean)expremd0.postConst && (boolean)expremd1.postConst; + } else if (ctx.BOOLOR() != null) { + boolemd.preConst = (boolean)expremd0.postConst || (boolean)expremd1.postConst; + } else { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + } + + boolemd.from = definition.booleanType; + boolemd.typesafe = expremd0.typesafe && expremd1.typesafe; + } + + void processConditional(final ConditionalContext ctx) { + final ExpressionMetadata condemd = metadata.getExpressionMetadata(ctx); + + final ExpressionContext exprctx0 = AnalyzerUtility.updateExpressionTree(ctx.expression(0)); + final ExpressionMetadata expremd0 = metadata.createExpressionMetadata(exprctx0); + expremd0.to = definition.booleanType; + analyzer.visit(exprctx0); + caster.markCast(expremd0); + + if (expremd0.postConst != null) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Unnecessary conditional statement."); + } + + final ExpressionContext exprctx1 = AnalyzerUtility.updateExpressionTree(ctx.expression(1)); + final ExpressionMetadata expremd1 = metadata.createExpressionMetadata(exprctx1); + expremd1.to = condemd.to; + expremd1.explicit = condemd.explicit; + analyzer.visit(exprctx1); + + final ExpressionContext exprctx2 = AnalyzerUtility.updateExpressionTree(ctx.expression(2)); + final ExpressionMetadata expremd2 = metadata.createExpressionMetadata(exprctx2); + expremd2.to = condemd.to; + expremd2.explicit = condemd.explicit; + analyzer.visit(exprctx2); + + if (condemd.to == null) { + final Type promote = promoter.promoteConditional(expremd1.from, expremd2.from, expremd1.preConst, expremd2.preConst); + + expremd1.to = promote; + expremd2.to = promote; + condemd.from = promote; + } else { + condemd.from = condemd.to; + } + + caster.markCast(expremd1); + caster.markCast(expremd2); + + condemd.typesafe = expremd0.typesafe && expremd1.typesafe; + } + + void processAssignment(final AssignmentContext ctx) { + final ExpressionMetadata assignemd = metadata.getExpressionMetadata(ctx); + + final ExtstartContext extstartctx = ctx.extstart(); + final ExternalMetadata extstartemd = metadata.createExternalMetadata(extstartctx); + + extstartemd.read = assignemd.read; + extstartemd.storeExpr = AnalyzerUtility.updateExpressionTree(ctx.expression()); + + if (ctx.AMUL() != null) { + extstartemd.token = MUL; + } else if (ctx.ADIV() != null) { + extstartemd.token = DIV; + } else if (ctx.AREM() != null) { + extstartemd.token = REM; + } else if (ctx.AADD() != null) { + extstartemd.token = ADD; + } else if (ctx.ASUB() != null) { + extstartemd.token = SUB; + } else if (ctx.ALSH() != null) { + extstartemd.token = LSH; + } else if (ctx.AUSH() != null) { + extstartemd.token = USH; + } else if (ctx.ARSH() != null) { + extstartemd.token = RSH; + } else if (ctx.AAND() != null) { + extstartemd.token = BWAND; + } else if (ctx.AXOR() != null) { + extstartemd.token = BWXOR; + } else if (ctx.AOR() != null) { + extstartemd.token = BWOR; + } + + analyzer.visit(extstartctx); + + assignemd.statement = true; + assignemd.from = extstartemd.read ? extstartemd.current : definition.voidType; + assignemd.typesafe = extstartemd.current.sort != Sort.DEF; + } + + void processIncrement(final IncrementContext ctx) { + final ExpressionMetadata incremd = metadata.getExpressionMetadata(ctx); + final Sort sort = incremd.to == null ? null : incremd.to.sort; + final boolean positive = ctx.INCR() != null; + + if (incremd.to == null) { + incremd.preConst = positive ? 1 : -1; + incremd.from = definition.intType; + } else { + switch (sort) { + case LONG: + incremd.preConst = positive ? 1L : -1L; + incremd.from = definition.longType; + break; + case FLOAT: + incremd.preConst = positive ? 1.0F : -1.0F; + incremd.from = definition.floatType; + break; + case DOUBLE: + incremd.preConst = positive ? 1.0 : -1.0; + incremd.from = definition.doubleType; + break; + default: + incremd.preConst = positive ? 1 : -1; + incremd.from = definition.intType; + } + } + } +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerExternal.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerExternal.java new file mode 100644 index 00000000000..db3ab06e785 --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerExternal.java @@ -0,0 +1,816 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +import org.antlr.v4.runtime.ParserRuleContext; +import org.elasticsearch.painless.AnalyzerUtility.Variable; +import org.elasticsearch.painless.Definition.Constructor; +import org.elasticsearch.painless.Definition.Field; +import org.elasticsearch.painless.Definition.Method; +import org.elasticsearch.painless.Definition.Sort; +import org.elasticsearch.painless.Definition.Struct; +import org.elasticsearch.painless.Definition.Type; +import org.elasticsearch.painless.Metadata.ExpressionMetadata; +import org.elasticsearch.painless.Metadata.ExtNodeMetadata; +import org.elasticsearch.painless.Metadata.ExternalMetadata; +import org.elasticsearch.painless.PainlessParser.DecltypeContext; +import org.elasticsearch.painless.PainlessParser.ExpressionContext; +import org.elasticsearch.painless.PainlessParser.ExtbraceContext; +import org.elasticsearch.painless.PainlessParser.ExtcallContext; +import org.elasticsearch.painless.PainlessParser.ExtcastContext; +import org.elasticsearch.painless.PainlessParser.ExtdotContext; +import org.elasticsearch.painless.PainlessParser.ExtfieldContext; +import org.elasticsearch.painless.PainlessParser.ExtnewContext; +import org.elasticsearch.painless.PainlessParser.ExtprecContext; +import org.elasticsearch.painless.PainlessParser.ExtstartContext; +import org.elasticsearch.painless.PainlessParser.ExtstringContext; +import org.elasticsearch.painless.PainlessParser.ExttypeContext; +import org.elasticsearch.painless.PainlessParser.ExtvarContext; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.painless.PainlessParser.ADD; +import static org.elasticsearch.painless.PainlessParser.BWAND; +import static org.elasticsearch.painless.PainlessParser.BWOR; +import static org.elasticsearch.painless.PainlessParser.BWXOR; +import static org.elasticsearch.painless.PainlessParser.DIV; +import static org.elasticsearch.painless.PainlessParser.MUL; +import static org.elasticsearch.painless.PainlessParser.REM; +import static org.elasticsearch.painless.PainlessParser.SUB; + +class AnalyzerExternal { + private final Metadata metadata; + private final Definition definition; + + private final Analyzer analyzer; + private final AnalyzerUtility utility; + private final AnalyzerCaster caster; + private final AnalyzerPromoter promoter; + + AnalyzerExternal(final Metadata metadata, final Analyzer analyzer, final AnalyzerUtility utility, + final AnalyzerCaster caster, final AnalyzerPromoter promoter) { + this.metadata = metadata; + this.definition = metadata.definition; + + this.analyzer = analyzer; + this.utility = utility; + this.caster = caster; + this.promoter = promoter; + } + + void processExtstart(final ExtstartContext ctx) { + final ExtprecContext precctx = ctx.extprec(); + final ExtcastContext castctx = ctx.extcast(); + final ExttypeContext typectx = ctx.exttype(); + final ExtvarContext varctx = ctx.extvar(); + final ExtnewContext newctx = ctx.extnew(); + final ExtstringContext stringctx = ctx.extstring(); + + if (precctx != null) { + metadata.createExtNodeMetadata(ctx, precctx); + analyzer.visit(precctx); + } else if (castctx != null) { + metadata.createExtNodeMetadata(ctx, castctx); + analyzer.visit(castctx); + } else if (typectx != null) { + metadata.createExtNodeMetadata(ctx, typectx); + analyzer.visit(typectx); + } else if (varctx != null) { + metadata.createExtNodeMetadata(ctx, varctx); + analyzer.visit(varctx); + } else if (newctx != null) { + metadata.createExtNodeMetadata(ctx, newctx); + analyzer.visit(newctx); + } else if (stringctx != null) { + metadata.createExtNodeMetadata(ctx, stringctx); + analyzer.visit(stringctx); + } else { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + } + + void processExtprec(final ExtprecContext ctx) { + final ExtNodeMetadata precenmd = metadata.getExtNodeMetadata(ctx); + final ParserRuleContext parent = precenmd.parent; + final ExternalMetadata parentemd = metadata.getExternalMetadata(parent); + + final ExtprecContext precctx = ctx.extprec(); + final ExtcastContext castctx = ctx.extcast(); + final ExttypeContext typectx = ctx.exttype(); + final ExtvarContext varctx = ctx.extvar(); + final ExtnewContext newctx = ctx.extnew(); + final ExtstringContext stringctx = ctx.extstring(); + + final ExtdotContext dotctx = ctx.extdot(); + final ExtbraceContext bracectx = ctx.extbrace(); + + if (dotctx != null || bracectx != null) { + ++parentemd.scope; + } + + if (precctx != null) { + metadata.createExtNodeMetadata(parent, precctx); + analyzer.visit(precctx); + } else if (castctx != null) { + metadata.createExtNodeMetadata(parent, castctx); + analyzer.visit(castctx); + } else if (typectx != null) { + metadata.createExtNodeMetadata(parent, typectx); + analyzer.visit(typectx); + } else if (varctx != null) { + metadata.createExtNodeMetadata(parent, varctx); + analyzer.visit(varctx); + } else if (newctx != null) { + metadata.createExtNodeMetadata(parent, newctx); + analyzer.visit(newctx); + } else if (stringctx != null) { + metadata.createExtNodeMetadata(ctx, stringctx); + analyzer.visit(stringctx); + } else { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + + parentemd.statement = false; + + if (dotctx != null) { + --parentemd.scope; + + metadata.createExtNodeMetadata(parent, dotctx); + analyzer.visit(dotctx); + } else if (bracectx != null) { + --parentemd.scope; + + metadata.createExtNodeMetadata(parent, bracectx); + analyzer.visit(bracectx); + } + } + + void processExtcast(final ExtcastContext ctx) { + final ExtNodeMetadata castenmd = metadata.getExtNodeMetadata(ctx); + final ParserRuleContext parent = castenmd.parent; + final ExternalMetadata parentemd = metadata.getExternalMetadata(parent); + + final ExtprecContext precctx = ctx.extprec(); + final ExtcastContext castctx = ctx.extcast(); + final ExttypeContext typectx = ctx.exttype(); + final ExtvarContext varctx = ctx.extvar(); + final ExtnewContext newctx = ctx.extnew(); + final ExtstringContext stringctx = ctx.extstring(); + + if (precctx != null) { + metadata.createExtNodeMetadata(parent, precctx); + analyzer.visit(precctx); + } else if (castctx != null) { + metadata.createExtNodeMetadata(parent, castctx); + analyzer.visit(castctx); + } else if (typectx != null) { + metadata.createExtNodeMetadata(parent, typectx); + analyzer.visit(typectx); + } else if (varctx != null) { + metadata.createExtNodeMetadata(parent, varctx); + analyzer.visit(varctx); + } else if (newctx != null) { + metadata.createExtNodeMetadata(parent, newctx); + analyzer.visit(newctx); + } else if (stringctx != null) { + metadata.createExtNodeMetadata(ctx, stringctx); + analyzer.visit(stringctx); + } else { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + + final DecltypeContext declctx = ctx.decltype(); + final ExpressionMetadata declemd = metadata.createExpressionMetadata(declctx); + analyzer.visit(declctx); + + castenmd.castTo = caster.getLegalCast(ctx, parentemd.current, declemd.from, true); + castenmd.type = declemd.from; + parentemd.current = declemd.from; + parentemd.statement = false; + } + + void processExtbrace(final ExtbraceContext ctx) { + final ExtNodeMetadata braceenmd = metadata.getExtNodeMetadata(ctx); + final ParserRuleContext parent = braceenmd.parent; + final ExternalMetadata parentemd = metadata.getExternalMetadata(parent); + + final boolean array = parentemd.current.sort == Sort.ARRAY; + final boolean def = parentemd.current.sort == Sort.DEF; + boolean map = false; + boolean list = false; + + try { + parentemd.current.clazz.asSubclass(Map.class); + map = true; + } catch (final ClassCastException exception) { + // Do nothing. + } + + try { + parentemd.current.clazz.asSubclass(List.class); + list = true; + } catch (final ClassCastException exception) { + // Do nothing. + } + + final ExtdotContext dotctx = ctx.extdot(); + final ExtbraceContext bracectx = ctx.extbrace(); + + braceenmd.last = parentemd.scope == 0 && dotctx == null && bracectx == null; + + final ExpressionContext exprctx = AnalyzerUtility.updateExpressionTree(ctx.expression()); + final ExpressionMetadata expremd = metadata.createExpressionMetadata(exprctx); + + if (array || def) { + expremd.to = array ? definition.intType : definition.objectType; + analyzer.visit(exprctx); + caster.markCast(expremd); + + braceenmd.target = "#brace"; + braceenmd.type = def ? definition.defType : + definition.getType(parentemd.current.struct, parentemd.current.type.getDimensions() - 1); + analyzeLoadStoreExternal(ctx); + parentemd.current = braceenmd.type; + + if (dotctx != null) { + metadata.createExtNodeMetadata(parent, dotctx); + analyzer.visit(dotctx); + } else if (bracectx != null) { + metadata.createExtNodeMetadata(parent, bracectx); + analyzer.visit(bracectx); + } + } else { + final boolean store = braceenmd.last && parentemd.storeExpr != null; + final boolean get = parentemd.read || parentemd.token > 0 || !braceenmd.last; + final boolean set = braceenmd.last && store; + + Method getter; + Method setter; + Type valuetype; + Type settype; + + if (map) { + getter = parentemd.current.struct.methods.get("get"); + setter = parentemd.current.struct.methods.get("put"); + + if (getter != null && (getter.rtn.sort == Sort.VOID || getter.arguments.size() != 1)) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + + "Illegal map get shortcut for type [" + parentemd.current.name + "]."); + } + + if (setter != null && setter.arguments.size() != 2) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + + "Illegal map set shortcut for type [" + parentemd.current.name + "]."); + } + + if (getter != null && setter != null && (!getter.arguments.get(0).equals(setter.arguments.get(0)) + || !getter.rtn.equals(setter.arguments.get(1)))) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Shortcut argument types must match."); + } + + valuetype = setter != null ? setter.arguments.get(0) : getter != null ? getter.arguments.get(0) : null; + settype = setter == null ? null : setter.arguments.get(1); + } else if (list) { + getter = parentemd.current.struct.methods.get("get"); + setter = parentemd.current.struct.methods.get("set"); + + if (getter != null && (getter.rtn.sort == Sort.VOID || getter.arguments.size() != 1 || + getter.arguments.get(0).sort != Sort.INT)) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + + "Illegal list get shortcut for type [" + parentemd.current.name + "]."); + } + + if (setter != null && (setter.arguments.size() != 2 || setter.arguments.get(0).sort != Sort.INT)) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + + "Illegal list set shortcut for type [" + parentemd.current.name + "]."); + } + + if (getter != null && setter != null && (!getter.arguments.get(0).equals(setter.arguments.get(0)) + || !getter.rtn.equals(setter.arguments.get(1)))) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Shortcut argument types must match."); + } + + valuetype = definition.intType; + settype = setter == null ? null : setter.arguments.get(1); + } else { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + + if ((get || set) && (!get || getter != null) && (!set || setter != null)) { + expremd.to = valuetype; + analyzer.visit(exprctx); + caster.markCast(expremd); + + braceenmd.target = new Object[] {getter, setter, true, null}; + braceenmd.type = get ? getter.rtn : settype; + analyzeLoadStoreExternal(ctx); + parentemd.current = get ? getter.rtn : setter.rtn; + } + } + + if (braceenmd.target == null) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + + "Attempting to address a non-array type [" + parentemd.current.name + "] as an array."); + } + } + + void processExtdot(final ExtdotContext ctx) { + final ExtNodeMetadata dotemnd = metadata.getExtNodeMetadata(ctx); + final ParserRuleContext parent = dotemnd.parent; + + final ExtcallContext callctx = ctx.extcall(); + final ExtfieldContext fieldctx = ctx.extfield(); + + if (callctx != null) { + metadata.createExtNodeMetadata(parent, callctx); + analyzer.visit(callctx); + } else if (fieldctx != null) { + metadata.createExtNodeMetadata(parent, fieldctx); + analyzer.visit(fieldctx); + } + } + + void processExttype(final ExttypeContext ctx) { + final ExtNodeMetadata typeenmd = metadata.getExtNodeMetadata(ctx); + final ParserRuleContext parent = typeenmd.parent; + final ExternalMetadata parentemd = metadata.getExternalMetadata(parent); + + if (parentemd.current != null) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Unexpected static type."); + } + + final String typestr = ctx.TYPE().getText(); + typeenmd.type = definition.getType(typestr); + parentemd.current = typeenmd.type; + parentemd.statik = true; + + final ExtdotContext dotctx = ctx.extdot(); + metadata.createExtNodeMetadata(parent, dotctx); + analyzer.visit(dotctx); + } + + void processExtcall(final ExtcallContext ctx) { + final ExtNodeMetadata callenmd = metadata.getExtNodeMetadata(ctx); + final ParserRuleContext parent = callenmd.parent; + final ExternalMetadata parentemd = metadata.getExternalMetadata(parent); + + final ExtdotContext dotctx = ctx.extdot(); + final ExtbraceContext bracectx = ctx.extbrace(); + + callenmd.last = parentemd.scope == 0 && dotctx == null && bracectx == null; + + final String name = ctx.EXTID().getText(); + + if (parentemd.current.sort == Sort.ARRAY) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Unexpected call [" + name + "] on an array."); + } else if (callenmd.last && parentemd.storeExpr != null) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Cannot assign a value to a call [" + name + "]."); + } + + final Struct struct = parentemd.current.struct; + final List arguments = ctx.arguments().expression(); + final int size = arguments.size(); + Type[] types; + + final Method method = parentemd.statik ? struct.functions.get(name) : struct.methods.get(name); + final boolean def = parentemd.current.sort == Sort.DEF; + + if (method == null && !def) { + throw new IllegalArgumentException( + AnalyzerUtility.error(ctx) + "Unknown call [" + name + "] on type [" + struct.name + "]."); + } else if (method != null) { + types = new Type[method.arguments.size()]; + method.arguments.toArray(types); + + callenmd.target = method; + callenmd.type = method.rtn; + parentemd.statement = !parentemd.read && callenmd.last; + parentemd.current = method.rtn; + + if (size != types.length) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "When calling [" + name + "] on type " + + "[" + struct.name + "] expected [" + types.length + "] arguments," + + " but found [" + arguments.size() + "]."); + } + } else { + types = new Type[arguments.size()]; + Arrays.fill(types, definition.defType); + + callenmd.target = name; + callenmd.type = definition.defType; + parentemd.statement = !parentemd.read && callenmd.last; + parentemd.current = callenmd.type; + } + + for (int argument = 0; argument < size; ++argument) { + final ExpressionContext exprctx = AnalyzerUtility.updateExpressionTree(arguments.get(argument)); + final ExpressionMetadata expremd = metadata.createExpressionMetadata(exprctx); + expremd.to = types[argument]; + analyzer.visit(exprctx); + caster.markCast(expremd); + } + + parentemd.statik = false; + + if (dotctx != null) { + metadata.createExtNodeMetadata(parent, dotctx); + analyzer.visit(dotctx); + } else if (bracectx != null) { + metadata.createExtNodeMetadata(parent, bracectx); + analyzer.visit(bracectx); + } + } + + void processExtvar(final ExtvarContext ctx) { + final ExtNodeMetadata varenmd = metadata.getExtNodeMetadata(ctx); + final ParserRuleContext parent = varenmd.parent; + final ExternalMetadata parentemd = metadata.getExternalMetadata(parent); + + final String name = ctx.ID().getText(); + + final ExtdotContext dotctx = ctx.extdot(); + final ExtbraceContext bracectx = ctx.extbrace(); + + if (parentemd.current != null) { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected variable [" + name + "] load."); + } + + varenmd.last = parentemd.scope == 0 && dotctx == null && bracectx == null; + + final Variable variable = utility.getVariable(name); + + if (variable == null) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Unknown variable [" + name + "]."); + } + + varenmd.target = variable.slot; + varenmd.type = variable.type; + analyzeLoadStoreExternal(ctx); + parentemd.current = varenmd.type; + + if (dotctx != null) { + metadata.createExtNodeMetadata(parent, dotctx); + analyzer.visit(dotctx); + } else if (bracectx != null) { + metadata.createExtNodeMetadata(parent, bracectx); + analyzer.visit(bracectx); + } + } + + void processExtfield(final ExtfieldContext ctx) { + final ExtNodeMetadata memberenmd = metadata.getExtNodeMetadata(ctx); + final ParserRuleContext parent = memberenmd.parent; + final ExternalMetadata parentemd = metadata.getExternalMetadata(parent); + + if (ctx.EXTID() == null && ctx.EXTINTEGER() == null) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + + final String value = ctx.EXTID() == null ? ctx.EXTINTEGER().getText() : ctx.EXTID().getText(); + + final ExtdotContext dotctx = ctx.extdot(); + final ExtbraceContext bracectx = ctx.extbrace(); + + memberenmd.last = parentemd.scope == 0 && dotctx == null && bracectx == null; + final boolean store = memberenmd.last && parentemd.storeExpr != null; + + if (parentemd.current == null) { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected field [" + value + "] load."); + } + + if (parentemd.current.sort == Sort.ARRAY) { + if ("length".equals(value)) { + if (!parentemd.read) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Must read array field [length]."); + } else if (store) { + throw new IllegalArgumentException( + AnalyzerUtility.error(ctx) + "Cannot write to read-only array field [length]."); + } + + memberenmd.target = "#length"; + memberenmd.type = definition.intType; + parentemd.current = definition.intType; + } else { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Unexpected array field [" + value + "]."); + } + } else if (parentemd.current.sort == Sort.DEF) { + memberenmd.target = value; + memberenmd.type = definition.defType; + analyzeLoadStoreExternal(ctx); + parentemd.current = memberenmd.type; + } else { + final Struct struct = parentemd.current.struct; + final Field field = parentemd.statik ? struct.statics.get(value) : struct.members.get(value); + + if (field != null) { + if (store && java.lang.reflect.Modifier.isFinal(field.reflect.getModifiers())) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Cannot write to read-only" + + " field [" + value + "] for type [" + struct.name + "]."); + } + + memberenmd.target = field; + memberenmd.type = field.type; + analyzeLoadStoreExternal(ctx); + parentemd.current = memberenmd.type; + } else { + final boolean get = parentemd.read || parentemd.token > 0 || !memberenmd.last; + final boolean set = memberenmd.last && store; + + Method getter = struct.methods.get("get" + Character.toUpperCase(value.charAt(0)) + value.substring(1)); + Method setter = struct.methods.get("set" + Character.toUpperCase(value.charAt(0)) + value.substring(1)); + Object constant = null; + + if (getter != null && (getter.rtn.sort == Sort.VOID || !getter.arguments.isEmpty())) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + + "Illegal get shortcut on field [" + value + "] for type [" + struct.name + "]."); + } + + if (setter != null && (setter.rtn.sort != Sort.VOID || setter.arguments.size() != 1)) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + + "Illegal set shortcut on field [" + value + "] for type [" + struct.name + "]."); + } + + Type settype = setter == null ? null : setter.arguments.get(0); + + if (getter == null && setter == null) { + if (ctx.EXTID() != null) { + try { + parentemd.current.clazz.asSubclass(Map.class); + + getter = parentemd.current.struct.methods.get("get"); + setter = parentemd.current.struct.methods.get("put"); + + if (getter != null && (getter.rtn.sort == Sort.VOID || getter.arguments.size() != 1 || + getter.arguments.get(0).sort != Sort.STRING)) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + + "Illegal map get shortcut [" + value + "] for type [" + struct.name + "]."); + } + + if (setter != null && (setter.arguments.size() != 2 || + setter.arguments.get(0).sort != Sort.STRING)) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + + "Illegal map set shortcut [" + value + "] for type [" + struct.name + "]."); + } + + if (getter != null && setter != null && !getter.rtn.equals(setter.arguments.get(1))) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Shortcut argument types must match."); + } + + settype = setter == null ? null : setter.arguments.get(1); + constant = value; + } catch (ClassCastException exception) { + //Do nothing. + } + } else if (ctx.EXTINTEGER() != null) { + try { + parentemd.current.clazz.asSubclass(List.class); + + getter = parentemd.current.struct.methods.get("get"); + setter = parentemd.current.struct.methods.get("set"); + + if (getter != null && (getter.rtn.sort == Sort.VOID || getter.arguments.size() != 1 || + getter.arguments.get(0).sort != Sort.INT)) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + + "Illegal list get shortcut [" + value + "] for type [" + struct.name + "]."); + } + + if (setter != null && (setter.rtn.sort != Sort.VOID || setter.arguments.size() != 2 || + setter.arguments.get(0).sort != Sort.INT)) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + + "Illegal list set shortcut [" + value + "] for type [" + struct.name + "]."); + } + + if (getter != null && setter != null && !getter.rtn.equals(setter.arguments.get(1))) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Shortcut argument types must match."); + } + + settype = setter == null ? null : setter.arguments.get(1); + + try { + constant = Integer.parseInt(value); + } catch (NumberFormatException exception) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + + "Illegal list shortcut value [" + value + "]."); + } + } catch (ClassCastException exception) { + //Do nothing. + } + } else { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + } + + if ((get || set) && (!get || getter != null) && (!set || setter != null)) { + memberenmd.target = new Object[] {getter, setter, constant != null, constant}; + memberenmd.type = get ? getter.rtn : settype; + analyzeLoadStoreExternal(ctx); + parentemd.current = get ? getter.rtn : setter.rtn; + } + } + + if (memberenmd.target == null) { + throw new IllegalArgumentException( + AnalyzerUtility.error(ctx) + "Unknown field [" + value + "] for type [" + struct.name + "]."); + } + } + + parentemd.statik = false; + + if (dotctx != null) { + metadata.createExtNodeMetadata(parent, dotctx); + analyzer.visit(dotctx); + } else if (bracectx != null) { + metadata.createExtNodeMetadata(parent, bracectx); + analyzer.visit(bracectx); + } + } + + void processExtnew(final ExtnewContext ctx) { + final ExtNodeMetadata newenmd = metadata.getExtNodeMetadata(ctx); + final ParserRuleContext parent = newenmd.parent; + final ExternalMetadata parentemd = metadata.getExternalMetadata(parent); + + final ExtdotContext dotctx = ctx.extdot(); + final ExtbraceContext bracectx = ctx.extbrace(); + + newenmd.last = parentemd.scope == 0 && dotctx == null && bracectx == null; + + final String name = ctx.TYPE().getText(); + final Struct struct = definition.structs.get(name); + + if (parentemd.current != null) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Unexpected new call."); + } else if (struct == null) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Specified type [" + name + "] not found."); + } else if (newenmd.last && parentemd.storeExpr != null) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Cannot assign a value to a new call."); + } + + final boolean newclass = ctx.arguments() != null; + final boolean newarray = !ctx.expression().isEmpty(); + + final List arguments = newclass ? ctx.arguments().expression() : ctx.expression(); + final int size = arguments.size(); + + Type[] types; + + if (newarray) { + if (!parentemd.read) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "A newly created array must be assigned."); + } + + types = new Type[size]; + Arrays.fill(types, definition.intType); + + newenmd.target = "#makearray"; + + if (size > 1) { + newenmd.type = definition.getType(struct, size); + parentemd.current = newenmd.type; + } else if (size == 1) { + newenmd.type = definition.getType(struct, 0); + parentemd.current = definition.getType(struct, 1); + } else { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "A newly created array cannot have zero dimensions."); + } + } else if (newclass) { + final Constructor constructor = struct.constructors.get("new"); + + if (constructor != null) { + types = new Type[constructor.arguments.size()]; + constructor.arguments.toArray(types); + + newenmd.target = constructor; + newenmd.type = definition.getType(struct, 0); + parentemd.statement = !parentemd.read && newenmd.last; + parentemd.current = newenmd.type; + } else { + throw new IllegalArgumentException( + AnalyzerUtility.error(ctx) + "Unknown new call on type [" + struct.name + "]."); + } + } else { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Unknown state."); + } + + if (size != types.length) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "When calling [" + name + "] on type " + + "[" + struct.name + "] expected [" + types.length + "] arguments," + + " but found [" + arguments.size() + "]."); + } + + for (int argument = 0; argument < size; ++argument) { + final ExpressionContext exprctx = AnalyzerUtility.updateExpressionTree(arguments.get(argument)); + final ExpressionMetadata expremd = metadata.createExpressionMetadata(exprctx); + expremd.to = types[argument]; + analyzer.visit(exprctx); + caster.markCast(expremd); + } + + if (dotctx != null) { + metadata.createExtNodeMetadata(parent, dotctx); + analyzer.visit(dotctx); + } else if (bracectx != null) { + metadata.createExtNodeMetadata(parent, bracectx); + analyzer.visit(bracectx); + } + } + + void processExtstring(final ExtstringContext ctx) { + final ExtNodeMetadata memberenmd = metadata.getExtNodeMetadata(ctx); + final ParserRuleContext parent = memberenmd.parent; + final ExternalMetadata parentemd = metadata.getExternalMetadata(parent); + + final String string = ctx.STRING().getText(); + + final ExtdotContext dotctx = ctx.extdot(); + final ExtbraceContext bracectx = ctx.extbrace(); + + memberenmd.last = parentemd.scope == 0 && dotctx == null && bracectx == null; + final boolean store = memberenmd.last && parentemd.storeExpr != null; + + if (parentemd.current != null) { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected String constant [" + string + "]."); + } + + if (!parentemd.read) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Must read String constant [" + string + "]."); + } else if (store) { + throw new IllegalArgumentException( + AnalyzerUtility.error(ctx) + "Cannot write to read-only String constant [" + string + "]."); + } + + memberenmd.target = string; + memberenmd.type = definition.stringType; + parentemd.current = definition.stringType; + + if (memberenmd.last) { + parentemd.constant = string; + } + + if (dotctx != null) { + metadata.createExtNodeMetadata(parent, dotctx); + analyzer.visit(dotctx); + } else if (bracectx != null) { + metadata.createExtNodeMetadata(parent, bracectx); + analyzer.visit(bracectx); + } + } + + private void analyzeLoadStoreExternal(final ParserRuleContext source) { + final ExtNodeMetadata extenmd = metadata.getExtNodeMetadata(source); + final ParserRuleContext parent = extenmd.parent; + final ExternalMetadata parentemd = metadata.getExternalMetadata(parent); + + if (extenmd.last && parentemd.storeExpr != null) { + final ParserRuleContext store = parentemd.storeExpr; + final ExpressionMetadata storeemd = metadata.createExpressionMetadata(parentemd.storeExpr); + final int token = parentemd.token; + + if (token > 0) { + analyzer.visit(store); + + final boolean add = token == ADD; + final boolean xor = token == BWAND || token == BWXOR || token == BWOR; + final boolean decimal = token == MUL || token == DIV || token == REM || token == SUB; + + extenmd.promote = add ? promoter.promoteAdd(extenmd.type, storeemd.from) : + xor ? promoter.promoteXor(extenmd.type, storeemd.from) : + promoter.promoteNumeric(extenmd.type, storeemd.from, decimal, true); + + if (extenmd.promote == null) { + throw new IllegalArgumentException("Cannot apply compound assignment to " + + "types [" + extenmd.type.name + "] and [" + storeemd.from.name + "]."); + } + + extenmd.castFrom = caster.getLegalCast(source, extenmd.type, extenmd.promote, false); + extenmd.castTo = caster.getLegalCast(source, extenmd.promote, extenmd.type, true); + + storeemd.to = add && extenmd.promote.sort == Sort.STRING ? storeemd.from : extenmd.promote; + caster.markCast(storeemd); + } else { + storeemd.to = extenmd.type; + analyzer.visit(store); + caster.markCast(storeemd); + } + } + } +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerPromoter.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerPromoter.java new file mode 100644 index 00000000000..ff77fb06d93 --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerPromoter.java @@ -0,0 +1,281 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +import org.elasticsearch.painless.Definition.Pair; +import org.elasticsearch.painless.Definition.Sort; +import org.elasticsearch.painless.Definition.Type; + +class AnalyzerPromoter { + private final Definition definition; + + AnalyzerPromoter(final Definition definition) { + this.definition = definition; + } + + Type promoteNumeric(final Type from, final boolean decimal, final boolean primitive) { + final Sort sort = from.sort; + + if (sort == Sort.DEF) { + return definition.defType; + } else if ((sort == Sort.DOUBLE || sort == Sort.DOUBLE_OBJ || sort == Sort.NUMBER) && decimal) { + return primitive ? definition.doubleType : definition.doubleobjType; + } else if ((sort == Sort.FLOAT || sort == Sort.FLOAT_OBJ) && decimal) { + return primitive ? definition.floatType : definition.floatobjType; + } else if (sort == Sort.LONG || sort == Sort.LONG_OBJ || sort == Sort.NUMBER) { + return primitive ? definition.longType : definition.longobjType; + } else if (sort.numeric) { + return primitive ? definition.intType : definition.intobjType; + } + + return null; + } + + Type promoteNumeric(final Type from0, final Type from1, final boolean decimal, final boolean primitive) { + final Sort sort0 = from0.sort; + final Sort sort1 = from1.sort; + + if (sort0 == Sort.DEF || sort1 == Sort.DEF) { + return definition.defType; + } + + if (decimal) { + if (sort0 == Sort.DOUBLE || sort0 == Sort.DOUBLE_OBJ || sort0 == Sort.NUMBER || + sort1 == Sort.DOUBLE || sort1 == Sort.DOUBLE_OBJ || sort1 == Sort.NUMBER) { + return primitive ? definition.doubleType : definition.doubleobjType; + } else if (sort0 == Sort.FLOAT || sort0 == Sort.FLOAT_OBJ || sort1 == Sort.FLOAT || sort1 == Sort.FLOAT_OBJ) { + return primitive ? definition.floatType : definition.floatobjType; + } + } + + if (sort0 == Sort.LONG || sort0 == Sort.LONG_OBJ || sort0 == Sort.NUMBER || + sort1 == Sort.LONG || sort1 == Sort.LONG_OBJ || sort1 == Sort.NUMBER) { + return primitive ? definition.longType : definition.longobjType; + } else if (sort0.numeric && sort1.numeric) { + return primitive ? definition.intType : definition.intobjType; + } + + return null; + } + + Type promoteAdd(final Type from0, final Type from1) { + final Sort sort0 = from0.sort; + final Sort sort1 = from1.sort; + + if (sort0 == Sort.STRING || sort1 == Sort.STRING) { + return definition.stringType; + } + + return promoteNumeric(from0, from1, true, true); + } + + Type promoteXor(final Type from0, final Type from1) { + final Sort sort0 = from0.sort; + final Sort sort1 = from1.sort; + + if (sort0.bool || sort1.bool) { + return definition.booleanType; + } + + return promoteNumeric(from0, from1, false, true); + } + + Type promoteEquality(final Type from0, final Type from1) { + final Sort sort0 = from0.sort; + final Sort sort1 = from1.sort; + + if (sort0 == Sort.DEF || sort1 == Sort.DEF) { + return definition.defType; + } + + final boolean primitive = sort0.primitive && sort1.primitive; + + if (sort0.bool && sort1.bool) { + return primitive ? definition.booleanType : definition.booleanobjType; + } + + if (sort0.numeric && sort1.numeric) { + return promoteNumeric(from0, from1, true, primitive); + } + + return definition.objectType; + } + + Type promoteReference(final Type from0, final Type from1) { + final Sort sort0 = from0.sort; + final Sort sort1 = from1.sort; + + if (sort0 == Sort.DEF || sort1 == Sort.DEF) { + return definition.defType; + } + + if (sort0.primitive && sort1.primitive) { + if (sort0.bool && sort1.bool) { + return definition.booleanType; + } + + if (sort0.numeric && sort1.numeric) { + return promoteNumeric(from0, from1, true, true); + } + } + + return definition.objectType; + } + + Type promoteConditional(final Type from0, final Type from1, final Object const0, final Object const1) { + if (from0.equals(from1)) { + return from0; + } + + final Sort sort0 = from0.sort; + final Sort sort1 = from1.sort; + + if (sort0 == Sort.DEF || sort1 == Sort.DEF) { + return definition.defType; + } + + final boolean primitive = sort0.primitive && sort1.primitive; + + if (sort0.bool && sort1.bool) { + return primitive ? definition.booleanType : definition.booleanobjType; + } + + if (sort0.numeric && sort1.numeric) { + if (sort0 == Sort.DOUBLE || sort0 == Sort.DOUBLE_OBJ || sort1 == Sort.DOUBLE || sort1 == Sort.DOUBLE_OBJ) { + return primitive ? definition.doubleType : definition.doubleobjType; + } else if (sort0 == Sort.FLOAT || sort0 == Sort.FLOAT_OBJ || sort1 == Sort.FLOAT || sort1 == Sort.FLOAT_OBJ) { + return primitive ? definition.floatType : definition.floatobjType; + } else if (sort0 == Sort.LONG || sort0 == Sort.LONG_OBJ || sort1 == Sort.LONG || sort1 == Sort.LONG_OBJ) { + return sort0.primitive && sort1.primitive ? definition.longType : definition.longobjType; + } else { + if (sort0 == Sort.BYTE || sort0 == Sort.BYTE_OBJ) { + if (sort1 == Sort.BYTE || sort1 == Sort.BYTE_OBJ) { + return primitive ? definition.byteType : definition.byteobjType; + } else if (sort1 == Sort.SHORT || sort1 == Sort.SHORT_OBJ) { + if (const1 != null) { + final short constant = (short)const1; + + if (constant <= Byte.MAX_VALUE && constant >= Byte.MIN_VALUE) { + return primitive ? definition.byteType : definition.byteobjType; + } + } + + return primitive ? definition.shortType : definition.shortobjType; + } else if (sort1 == Sort.CHAR || sort1 == Sort.CHAR_OBJ) { + return primitive ? definition.intType : definition.intobjType; + } else if (sort1 == Sort.INT || sort1 == Sort.INT_OBJ) { + if (const1 != null) { + final int constant = (int)const1; + + if (constant <= Byte.MAX_VALUE && constant >= Byte.MIN_VALUE) { + return primitive ? definition.byteType : definition.byteobjType; + } + } + + return primitive ? definition.intType : definition.intobjType; + } + } else if (sort0 == Sort.SHORT || sort0 == Sort.SHORT_OBJ) { + if (sort1 == Sort.BYTE || sort1 == Sort.BYTE_OBJ) { + if (const0 != null) { + final short constant = (short)const0; + + if (constant <= Byte.MAX_VALUE && constant >= Byte.MIN_VALUE) { + return primitive ? definition.byteType : definition.byteobjType; + } + } + + return primitive ? definition.shortType : definition.shortobjType; + } else if (sort1 == Sort.SHORT || sort1 == Sort.SHORT_OBJ) { + return primitive ? definition.shortType : definition.shortobjType; + } else if (sort1 == Sort.CHAR || sort1 == Sort.CHAR_OBJ) { + return primitive ? definition.intType : definition.intobjType; + } else if (sort1 == Sort.INT || sort1 == Sort.INT_OBJ) { + if (const1 != null) { + final int constant = (int)const1; + + if (constant <= Short.MAX_VALUE && constant >= Short.MIN_VALUE) { + return primitive ? definition.shortType : definition.shortobjType; + } + } + + return primitive ? definition.intType : definition.intobjType; + } + } else if (sort0 == Sort.CHAR || sort0 == Sort.CHAR_OBJ) { + if (sort1 == Sort.BYTE || sort1 == Sort.BYTE_OBJ) { + return primitive ? definition.intType : definition.intobjType; + } else if (sort1 == Sort.SHORT || sort1 == Sort.SHORT_OBJ) { + return primitive ? definition.intType : definition.intobjType; + } else if (sort1 == Sort.CHAR || sort1 == Sort.CHAR_OBJ) { + return primitive ? definition.charType : definition.charobjType; + } else if (sort1 == Sort.INT || sort1 == Sort.INT_OBJ) { + if (const1 != null) { + final int constant = (int)const1; + + if (constant <= Character.MAX_VALUE && constant >= Character.MIN_VALUE) { + return primitive ? definition.byteType : definition.byteobjType; + } + } + + return primitive ? definition.intType : definition.intobjType; + } + } else if (sort0 == Sort.INT || sort0 == Sort.INT_OBJ) { + if (sort1 == Sort.BYTE || sort1 == Sort.BYTE_OBJ) { + if (const0 != null) { + final int constant = (int)const0; + + if (constant <= Byte.MAX_VALUE && constant >= Byte.MIN_VALUE) { + return primitive ? definition.byteType : definition.byteobjType; + } + } + + return primitive ? definition.intType : definition.intobjType; + } else if (sort1 == Sort.SHORT || sort1 == Sort.SHORT_OBJ) { + if (const0 != null) { + final int constant = (int)const0; + + if (constant <= Short.MAX_VALUE && constant >= Short.MIN_VALUE) { + return primitive ? definition.byteType : definition.byteobjType; + } + } + + return primitive ? definition.intType : definition.intobjType; + } else if (sort1 == Sort.CHAR || sort1 == Sort.CHAR_OBJ) { + if (const0 != null) { + final int constant = (int)const0; + + if (constant <= Character.MAX_VALUE && constant >= Character.MIN_VALUE) { + return primitive ? definition.byteType : definition.byteobjType; + } + } + + return primitive ? definition.intType : definition.intobjType; + } else if (sort1 == Sort.INT || sort1 == Sort.INT_OBJ) { + return primitive ? definition.intType : definition.intobjType; + } + } + } + } + + final Pair pair = new Pair(from0, from1); + final Type bound = definition.bounds.get(pair); + + return bound == null ? definition.objectType : bound; + } +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerStatement.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerStatement.java new file mode 100644 index 00000000000..e44336035e6 --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerStatement.java @@ -0,0 +1,581 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +import org.elasticsearch.painless.Definition.Sort; +import org.elasticsearch.painless.Metadata.ExpressionMetadata; +import org.elasticsearch.painless.Metadata.StatementMetadata; +import org.elasticsearch.painless.PainlessParser.AfterthoughtContext; +import org.elasticsearch.painless.PainlessParser.BlockContext; +import org.elasticsearch.painless.PainlessParser.BreakContext; +import org.elasticsearch.painless.PainlessParser.ContinueContext; +import org.elasticsearch.painless.PainlessParser.DeclContext; +import org.elasticsearch.painless.PainlessParser.DeclarationContext; +import org.elasticsearch.painless.PainlessParser.DecltypeContext; +import org.elasticsearch.painless.PainlessParser.DeclvarContext; +import org.elasticsearch.painless.PainlessParser.DoContext; +import org.elasticsearch.painless.PainlessParser.ExprContext; +import org.elasticsearch.painless.PainlessParser.ExpressionContext; +import org.elasticsearch.painless.PainlessParser.ForContext; +import org.elasticsearch.painless.PainlessParser.IfContext; +import org.elasticsearch.painless.PainlessParser.InitializerContext; +import org.elasticsearch.painless.PainlessParser.MultipleContext; +import org.elasticsearch.painless.PainlessParser.ReturnContext; +import org.elasticsearch.painless.PainlessParser.SingleContext; +import org.elasticsearch.painless.PainlessParser.SourceContext; +import org.elasticsearch.painless.PainlessParser.StatementContext; +import org.elasticsearch.painless.PainlessParser.ThrowContext; +import org.elasticsearch.painless.PainlessParser.TrapContext; +import org.elasticsearch.painless.PainlessParser.TryContext; +import org.elasticsearch.painless.PainlessParser.WhileContext; + +import java.util.List; + +class AnalyzerStatement { + private final Metadata metadata; + private final Definition definition; + + private final Analyzer analyzer; + private final AnalyzerUtility utility; + private final AnalyzerCaster caster; + + AnalyzerStatement(final Metadata metadata, final Analyzer analyzer, + final AnalyzerUtility utility, final AnalyzerCaster caster) { + this.metadata = metadata; + this.definition = metadata.definition; + + this.analyzer = analyzer; + this.utility = utility; + this.caster = caster; + } + + void processSource(final SourceContext ctx) { + final StatementMetadata sourcesmd = metadata.getStatementMetadata(ctx); + final List statectxs = ctx.statement(); + final StatementContext lastctx = statectxs.get(statectxs.size() - 1); + + utility.incrementScope(); + + for (final StatementContext statectx : statectxs) { + if (sourcesmd.allLast) { + throw new IllegalArgumentException(AnalyzerUtility.error(statectx) + + "Statement will never be executed because all prior paths escape."); + } + + final StatementMetadata statesmd = metadata.createStatementMetadata(statectx); + statesmd.lastSource = statectx == lastctx; + analyzer.visit(statectx); + + sourcesmd.methodEscape = statesmd.methodEscape; + sourcesmd.allLast = statesmd.allLast; + } + + utility.decrementScope(); + } + + void processIf(final IfContext ctx) { + final StatementMetadata ifsmd = metadata.getStatementMetadata(ctx); + + final ExpressionContext exprctx = AnalyzerUtility.updateExpressionTree(ctx.expression()); + final ExpressionMetadata expremd = metadata.createExpressionMetadata(exprctx); + expremd.to = definition.booleanType; + analyzer.visit(exprctx); + caster.markCast(expremd); + + if (expremd.postConst != null) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "If statement is not necessary."); + } + + final BlockContext blockctx0 = ctx.block(0); + final StatementMetadata blocksmd0 = metadata.createStatementMetadata(blockctx0); + blocksmd0.lastSource = ifsmd.lastSource; + blocksmd0.inLoop = ifsmd.inLoop; + blocksmd0.lastLoop = ifsmd.lastLoop; + utility.incrementScope(); + analyzer.visit(blockctx0); + utility.decrementScope(); + + ifsmd.anyContinue = blocksmd0.anyContinue; + ifsmd.anyBreak = blocksmd0.anyBreak; + + ifsmd.count = blocksmd0.count; + + if (ctx.ELSE() != null) { + final BlockContext blockctx1 = ctx.block(1); + final StatementMetadata blocksmd1 = metadata.createStatementMetadata(blockctx1); + blocksmd1.lastSource = ifsmd.lastSource; + utility.incrementScope(); + analyzer.visit(blockctx1); + utility.decrementScope(); + + ifsmd.methodEscape = blocksmd0.methodEscape && blocksmd1.methodEscape; + ifsmd.loopEscape = blocksmd0.loopEscape && blocksmd1.loopEscape; + ifsmd.allLast = blocksmd0.allLast && blocksmd1.allLast; + ifsmd.anyContinue |= blocksmd1.anyContinue; + ifsmd.anyBreak |= blocksmd1.anyBreak; + + ifsmd.count = Math.max(ifsmd.count, blocksmd1.count); + } + } + + void processWhile(final WhileContext ctx) { + final StatementMetadata whilesmd = metadata.getStatementMetadata(ctx); + + utility.incrementScope(); + + final ExpressionContext exprctx = AnalyzerUtility.updateExpressionTree(ctx.expression()); + final ExpressionMetadata expremd = metadata.createExpressionMetadata(exprctx); + expremd.to = definition.booleanType; + analyzer.visit(exprctx); + caster.markCast(expremd); + + boolean continuous = false; + + if (expremd.postConst != null) { + continuous = (boolean)expremd.postConst; + + if (!continuous) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "The loop will never be executed."); + } + + if (ctx.empty() != null) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "The loop will never exit."); + } + } + + final BlockContext blockctx = ctx.block(); + + if (blockctx != null) { + final StatementMetadata blocksmd = metadata.createStatementMetadata(blockctx); + blocksmd.beginLoop = true; + blocksmd.inLoop = true; + analyzer.visit(blockctx); + + if (blocksmd.loopEscape && !blocksmd.anyContinue) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "All paths escape so the loop is not necessary."); + } + + if (continuous && !blocksmd.anyBreak) { + whilesmd.methodEscape = true; + whilesmd.allLast = true; + } + } + + whilesmd.count = 1; + + utility.decrementScope(); + } + + void processDo(final DoContext ctx) { + final StatementMetadata dosmd = metadata.getStatementMetadata(ctx); + + utility.incrementScope(); + + final BlockContext blockctx = ctx.block(); + final StatementMetadata blocksmd = metadata.createStatementMetadata(blockctx); + blocksmd.beginLoop = true; + blocksmd.inLoop = true; + analyzer.visit(blockctx); + + if (blocksmd.loopEscape && !blocksmd.anyContinue) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "All paths escape so the loop is not necessary."); + } + + final ExpressionContext exprctx = AnalyzerUtility.updateExpressionTree(ctx.expression()); + final ExpressionMetadata expremd = metadata.createExpressionMetadata(exprctx); + expremd.to = definition.booleanType; + analyzer.visit(exprctx); + caster.markCast(expremd); + + if (expremd.postConst != null) { + final boolean continuous = (boolean)expremd.postConst; + + if (!continuous) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "All paths escape so the loop is not necessary."); + } + + if (!blocksmd.anyBreak) { + dosmd.methodEscape = true; + dosmd.allLast = true; + } + } + + dosmd.count = 1; + + utility.decrementScope(); + } + + void processFor(final ForContext ctx) { + final StatementMetadata forsmd = metadata.getStatementMetadata(ctx); + boolean continuous = false; + + utility.incrementScope(); + + final InitializerContext initctx = ctx.initializer(); + + if (initctx != null) { + metadata.createStatementMetadata(initctx); + analyzer.visit(initctx); + } + + final ExpressionContext exprctx = AnalyzerUtility.updateExpressionTree(ctx.expression()); + + if (exprctx != null) { + final ExpressionMetadata expremd = metadata.createExpressionMetadata(exprctx); + expremd.to = definition.booleanType; + analyzer.visit(exprctx); + caster.markCast(expremd); + + if (expremd.postConst != null) { + continuous = (boolean)expremd.postConst; + + if (!continuous) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "The loop will never be executed."); + } + + if (ctx.empty() != null) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "The loop is continuous."); + } + } + } else { + continuous = true; + } + + final AfterthoughtContext atctx = ctx.afterthought(); + + if (atctx != null) { + metadata.createStatementMetadata(atctx); + analyzer.visit(atctx); + } + + final BlockContext blockctx = ctx.block(); + + if (blockctx != null) { + final StatementMetadata blocksmd = metadata.createStatementMetadata(blockctx); + blocksmd.beginLoop = true; + blocksmd.inLoop = true; + analyzer.visit(blockctx); + + if (blocksmd.loopEscape && !blocksmd.anyContinue) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "All paths escape so the loop is not necessary."); + } + + if (continuous && !blocksmd.anyBreak) { + forsmd.methodEscape = true; + forsmd.allLast = true; + } + } + + forsmd.count = 1; + + utility.decrementScope(); + } + + void processDecl(final DeclContext ctx) { + final StatementMetadata declsmd = metadata.getStatementMetadata(ctx); + + final DeclarationContext declctx = ctx.declaration(); + metadata.createStatementMetadata(declctx); + analyzer.visit(declctx); + + declsmd.count = 1; + } + + void processContinue(final ContinueContext ctx) { + final StatementMetadata continuesmd = metadata.getStatementMetadata(ctx); + + if (!continuesmd.inLoop) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Cannot have a continue statement outside of a loop."); + } + + if (continuesmd.lastLoop) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Unnecessary continue statement at the end of a loop."); + } + + continuesmd.allLast = true; + continuesmd.anyContinue = true; + + continuesmd.count = 1; + } + + void processBreak(final BreakContext ctx) { + final StatementMetadata breaksmd = metadata.getStatementMetadata(ctx); + + if (!breaksmd.inLoop) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Cannot have a break statement outside of a loop."); + } + + breaksmd.loopEscape = true; + breaksmd.allLast = true; + breaksmd.anyBreak = true; + + breaksmd.count = 1; + } + + void processReturn(final ReturnContext ctx) { + final StatementMetadata returnsmd = metadata.getStatementMetadata(ctx); + + final ExpressionContext exprctx = AnalyzerUtility.updateExpressionTree(ctx.expression()); + final ExpressionMetadata expremd = metadata.createExpressionMetadata(exprctx); + expremd.to = definition.objectType; + analyzer.visit(exprctx); + caster.markCast(expremd); + + returnsmd.methodEscape = true; + returnsmd.loopEscape = true; + returnsmd.allLast = true; + + returnsmd.count = 1; + } + + void processTry(final TryContext ctx) { + final StatementMetadata trysmd = metadata.getStatementMetadata(ctx); + + final BlockContext blockctx = ctx.block(); + final StatementMetadata blocksmd = metadata.createStatementMetadata(blockctx); + blocksmd.lastSource = trysmd.lastSource; + blocksmd.inLoop = trysmd.inLoop; + blocksmd.lastLoop = trysmd.lastLoop; + utility.incrementScope(); + analyzer.visit(blockctx); + utility.decrementScope(); + + trysmd.methodEscape = blocksmd.methodEscape; + trysmd.loopEscape = blocksmd.loopEscape; + trysmd.allLast = blocksmd.allLast; + trysmd.anyContinue = blocksmd.anyContinue; + trysmd.anyBreak = blocksmd.anyBreak; + + int trapcount = 0; + + for (final TrapContext trapctx : ctx.trap()) { + final StatementMetadata trapsmd = metadata.createStatementMetadata(trapctx); + trapsmd.lastSource = trysmd.lastSource; + trapsmd.inLoop = trysmd.inLoop; + trapsmd.lastLoop = trysmd.lastLoop; + utility.incrementScope(); + analyzer.visit(trapctx); + utility.decrementScope(); + + trysmd.methodEscape &= trapsmd.methodEscape; + trysmd.loopEscape &= trapsmd.loopEscape; + trysmd.allLast &= trapsmd.allLast; + trysmd.anyContinue |= trapsmd.anyContinue; + trysmd.anyBreak |= trapsmd.anyBreak; + + trapcount = Math.max(trapcount, trapsmd.count); + } + + trysmd.count = blocksmd.count + trapcount; + } + + void processThrow(final ThrowContext ctx) { + final StatementMetadata throwsmd = metadata.getStatementMetadata(ctx); + + final ExpressionContext exprctx = AnalyzerUtility.updateExpressionTree(ctx.expression()); + final ExpressionMetadata expremd = metadata.createExpressionMetadata(exprctx); + expremd.to = definition.exceptionType; + analyzer.visit(exprctx); + caster.markCast(expremd); + + throwsmd.methodEscape = true; + throwsmd.loopEscape = true; + throwsmd.allLast = true; + + throwsmd.count = 1; + } + + void processExpr(final ExprContext ctx) { + final StatementMetadata exprsmd = metadata.getStatementMetadata(ctx); + final ExpressionContext exprctx = AnalyzerUtility.updateExpressionTree(ctx.expression()); + final ExpressionMetadata expremd = metadata.createExpressionMetadata(exprctx); + expremd.read = exprsmd.lastSource; + analyzer.visit(exprctx); + + if (!expremd.statement && !exprsmd.lastSource) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Not a statement."); + } + + final boolean rtn = exprsmd.lastSource && expremd.from.sort != Sort.VOID; + exprsmd.methodEscape = rtn; + exprsmd.loopEscape = rtn; + exprsmd.allLast = rtn; + expremd.to = rtn ? definition.objectType : expremd.from; + caster.markCast(expremd); + + exprsmd.count = 1; + } + + void processMultiple(final MultipleContext ctx) { + final StatementMetadata multiplesmd = metadata.getStatementMetadata(ctx); + final List statectxs = ctx.statement(); + final StatementContext lastctx = statectxs.get(statectxs.size() - 1); + + for (StatementContext statectx : statectxs) { + if (multiplesmd.allLast) { + throw new IllegalArgumentException(AnalyzerUtility.error(statectx) + + "Statement will never be executed because all prior paths escape."); + } + + final StatementMetadata statesmd = metadata.createStatementMetadata(statectx); + statesmd.lastSource = multiplesmd.lastSource && statectx == lastctx; + statesmd.inLoop = multiplesmd.inLoop; + statesmd.lastLoop = (multiplesmd.beginLoop || multiplesmd.lastLoop) && statectx == lastctx; + analyzer.visit(statectx); + + multiplesmd.methodEscape = statesmd.methodEscape; + multiplesmd.loopEscape = statesmd.loopEscape; + multiplesmd.allLast = statesmd.allLast; + multiplesmd.anyContinue |= statesmd.anyContinue; + multiplesmd.anyBreak |= statesmd.anyBreak; + + multiplesmd.count += statesmd.count; + } + } + + void processSingle(final SingleContext ctx) { + final StatementMetadata singlesmd = metadata.getStatementMetadata(ctx); + + final StatementContext statectx = ctx.statement(); + final StatementMetadata statesmd = metadata.createStatementMetadata(statectx); + statesmd.lastSource = singlesmd.lastSource; + statesmd.inLoop = singlesmd.inLoop; + statesmd.lastLoop = singlesmd.beginLoop || singlesmd.lastLoop; + analyzer.visit(statectx); + + singlesmd.methodEscape = statesmd.methodEscape; + singlesmd.loopEscape = statesmd.loopEscape; + singlesmd.allLast = statesmd.allLast; + singlesmd.anyContinue = statesmd.anyContinue; + singlesmd.anyBreak = statesmd.anyBreak; + + singlesmd.count = statesmd.count; + } + + void processInitializer(InitializerContext ctx) { + final DeclarationContext declctx = ctx.declaration(); + final ExpressionContext exprctx = AnalyzerUtility.updateExpressionTree(ctx.expression()); + + if (declctx != null) { + metadata.createStatementMetadata(declctx); + analyzer.visit(declctx); + } else if (exprctx != null) { + final ExpressionMetadata expremd = metadata.createExpressionMetadata(exprctx); + expremd.read = false; + analyzer.visit(exprctx); + + expremd.to = expremd.from; + caster.markCast(expremd); + + if (!expremd.statement) { + throw new IllegalArgumentException(AnalyzerUtility.error(exprctx) + + "The initializer of a for loop must be a statement."); + } + } else { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + } + + void processAfterthought(AfterthoughtContext ctx) { + final ExpressionContext exprctx = AnalyzerUtility.updateExpressionTree(ctx.expression()); + + if (exprctx != null) { + final ExpressionMetadata expremd = metadata.createExpressionMetadata(exprctx); + expremd.read = false; + analyzer.visit(exprctx); + + expremd.to = expremd.from; + caster.markCast(expremd); + + if (!expremd.statement) { + throw new IllegalArgumentException(AnalyzerUtility.error(exprctx) + + "The afterthought of a for loop must be a statement."); + } + } + } + + void processDeclaration(final DeclarationContext ctx) { + final DecltypeContext decltypectx = ctx.decltype(); + final ExpressionMetadata decltypeemd = metadata.createExpressionMetadata(decltypectx); + analyzer.visit(decltypectx); + + for (final DeclvarContext declvarctx : ctx.declvar()) { + final ExpressionMetadata declvaremd = metadata.createExpressionMetadata(declvarctx); + declvaremd.to = decltypeemd.from; + analyzer.visit(declvarctx); + } + } + + void processDecltype(final DecltypeContext ctx) { + final ExpressionMetadata decltypeemd = metadata.getExpressionMetadata(ctx); + final String name = ctx.getText(); + decltypeemd.from = definition.getType(name); + } + + void processDeclvar(final DeclvarContext ctx) { + final ExpressionMetadata declvaremd = metadata.getExpressionMetadata(ctx); + + final String name = ctx.ID().getText(); + declvaremd.postConst = utility.addVariable(ctx, name, declvaremd.to).slot; + + final ExpressionContext exprctx = AnalyzerUtility.updateExpressionTree(ctx.expression()); + + if (exprctx != null) { + final ExpressionMetadata expremd = metadata.createExpressionMetadata(exprctx); + expremd.to = declvaremd.to; + analyzer.visit(exprctx); + caster.markCast(expremd); + } + } + + void processTrap(final TrapContext ctx) { + final StatementMetadata trapsmd = metadata.getStatementMetadata(ctx); + + final String type = ctx.TYPE().getText(); + trapsmd.exception = definition.getType(type); + + try { + trapsmd.exception.clazz.asSubclass(Exception.class); + } catch (final ClassCastException exception) { + throw new IllegalArgumentException(AnalyzerUtility.error(ctx) + "Invalid exception type [" + trapsmd.exception.name + "]."); + } + + final String id = ctx.ID().getText(); + trapsmd.slot = utility.addVariable(ctx, id, trapsmd.exception).slot; + + final BlockContext blockctx = ctx.block(); + + if (blockctx != null) { + final StatementMetadata blocksmd = metadata.createStatementMetadata(blockctx); + blocksmd.lastSource = trapsmd.lastSource; + blocksmd.inLoop = trapsmd.inLoop; + blocksmd.lastLoop = trapsmd.lastLoop; + analyzer.visit(blockctx); + + trapsmd.methodEscape = blocksmd.methodEscape; + trapsmd.loopEscape = blocksmd.loopEscape; + trapsmd.allLast = blocksmd.allLast; + trapsmd.anyContinue = blocksmd.anyContinue; + trapsmd.anyBreak = blocksmd.anyBreak; + } else if (ctx.emptyscope() == null) { + throw new IllegalStateException(AnalyzerUtility.error(ctx) + "Unexpected state."); + } + } +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerUtility.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerUtility.java new file mode 100644 index 00000000000..11fb669f190 --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/AnalyzerUtility.java @@ -0,0 +1,144 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +import org.antlr.v4.runtime.ParserRuleContext; +import org.antlr.v4.runtime.tree.ParseTree; +import org.elasticsearch.painless.Definition.Type; +import org.elasticsearch.painless.PainlessParser.ExpressionContext; +import org.elasticsearch.painless.PainlessParser.PrecedenceContext; + +import java.util.ArrayDeque; +import java.util.Deque; +import java.util.Iterator; + +class AnalyzerUtility { + static class Variable { + final String name; + final Type type; + final int slot; + + private Variable(final String name, final Type type, final int slot) { + this.name = name; + this.type = type; + this.slot = slot; + } + } + + /** + * A utility method to output consistent error messages. + * @param ctx The ANTLR node the error occurred in. + * @return The error message with tacked on line number and character position. + */ + static String error(final ParserRuleContext ctx) { + return "Analyzer Error [" + ctx.getStart().getLine() + ":" + ctx.getStart().getCharPositionInLine() + "]: "; + } + + /** + * The ANTLR parse tree is modified in one single case; a parent node needs to check a child node to see if it's + * a precedence node, and if so, it must be removed from the tree permanently. Once the ANTLR tree is built, + * precedence nodes are no longer necessary to maintain the correct ordering of the tree, so they only + * add a level of indirection where complicated decisions about metadata passing would have to be made. This + * method removes the need for those decisions. + * @param source The child ANTLR node to check for precedence. + * @return The updated child ANTLR node. + */ + static ExpressionContext updateExpressionTree(ExpressionContext source) { + // Check to see if the ANTLR node is a precedence node. + if (source instanceof PainlessParser.PrecedenceContext) { + final ParserRuleContext parent = source.getParent(); + int index = 0; + + // Mark the index of the source node within the list of child nodes from the parent. + for (final ParseTree child : parent.children) { + if (child == source) { + break; + } + + ++index; + } + + // If there are multiple precedence nodes in a row, remove them all. + while (source instanceof PrecedenceContext) { + source = ((PrecedenceContext)source).expression(); + } + + // Update the parent node with the child of the precedence node. + parent.children.set(index, source); + } + + return source; + } + + private final Deque scopes = new ArrayDeque<>(); + private final Deque variables = new ArrayDeque<>(); + + void incrementScope() { + scopes.push(0); + } + + void decrementScope() { + int remove = scopes.pop(); + + while (remove > 0) { + variables.pop(); + --remove; + } + } + + Variable getVariable(final String name) { + final Iterator itr = variables.iterator(); + + while (itr.hasNext()) { + final Variable variable = itr.next(); + + if (variable.name.equals(name)) { + return variable; + } + } + + return null; + } + + Variable addVariable(final ParserRuleContext source, final String name, final Type type) { + if (getVariable(name) != null) { + if (source == null) { + throw new IllegalArgumentException("Argument name [" + name + "] already defined within the scope."); + } else { + throw new IllegalArgumentException(error(source) + "Variable name [" + name + "] already defined within the scope."); + } + } + + final Variable previous = variables.peekFirst(); + int slot = 0; + + if (previous != null) { + slot += previous.slot + previous.type.type.getSize(); + } + + final Variable variable = new Variable(name, type, slot); + variables.push(variable); + + final int update = scopes.pop() + 1; + scopes.push(update); + + return variable; + } +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java index d1b0e2dc6fe..3d8123a4800 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java @@ -160,7 +160,7 @@ final class Compiler { // throw new RuntimeException(e); // } - final Class clazz = loader.define(Writer.CLASS_NAME, bytes); + final Class clazz = loader.define(WriterConstants.CLASS_NAME, bytes); final java.lang.reflect.Constructor constructor = clazz.getConstructor(Definition.class, String.class, String.class); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Metadata.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Metadata.java index cace48ff433..e38d6da7d98 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Metadata.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Metadata.java @@ -20,11 +20,8 @@ package org.elasticsearch.painless; import org.antlr.v4.runtime.ParserRuleContext; -import org.antlr.v4.runtime.tree.ParseTree; import org.elasticsearch.painless.Definition.Cast; import org.elasticsearch.painless.Definition.Type; -import org.elasticsearch.painless.PainlessParser.ExpressionContext; -import org.elasticsearch.painless.PainlessParser.PrecedenceContext; import java.util.HashMap; import java.util.Map; @@ -37,7 +34,6 @@ import java.util.Map; * the root of the ANTLR parse tree, and the {@link CompilerSettings}. */ class Metadata { - /** * StatementMetadata is used to store metadata mostly about * control flow for ANTLR nodes related to if/else, do, while, for, etc. @@ -386,15 +382,6 @@ class Metadata { } } - /** - * A utility method to output consistent error messages. - * @param ctx The ANTLR node the error occurred in. - * @return The error message with tacked on line number and character position. - */ - static String error(final ParserRuleContext ctx) { - return "Error [" + ctx.getStart().getLine() + ":" + ctx.getStart().getCharPositionInLine() + "]: "; - } - /** * Acts as both the Painless API and white-list for what types and methods are allowed. */ @@ -490,49 +477,13 @@ class Metadata { final StatementMetadata sourcesmd = statementMetadata.get(source); if (sourcesmd == null) { - throw new IllegalStateException(error(source) + "Statement metadata does not exist at" + + throw new IllegalStateException("Statement metadata does not exist at" + " the parse node with text [" + source.getText() + "]."); } return sourcesmd; } - /** - * The ANTLR parse tree is modified in one single case; a parent node needs to check a child node to see if it's - * a precedence node, and if so, it must be removed from the tree permanently. Once the ANTLR tree is built, - * precedence nodes are no longer necessary to maintain the correct ordering of the tree, so they only - * add a level of indirection where complicated decisions about metadata passing would have to be made. This - * method removes the need for those decisions. - * @param source The child ANTLR node to check for precedence. - * @return The updated child ANTLR node. - */ - ExpressionContext updateExpressionTree(ExpressionContext source) { - // Check to see if the ANTLR node is a precedence node. - if (source instanceof PrecedenceContext) { - final ParserRuleContext parent = source.getParent(); - int index = 0; - - // Mark the index of the source node within the list of child nodes from the parent. - for (final ParseTree child : parent.children) { - if (child == source) { - break; - } - - ++index; - } - - // If there are multiple precedence nodes in a row, remove them all. - while (source instanceof PrecedenceContext) { - source = ((PrecedenceContext)source).expression(); - } - - // Update the parent node with the child of the precedence node. - parent.children.set(index, source); - } - - return source; - } - /** * Creates a new ExpressionMetadata and stores it in the expressionMetadata map. * @param source The ANTLR node for this metadata. @@ -554,7 +505,7 @@ class Metadata { final ExpressionMetadata sourceemd = expressionMetadata.get(source); if (sourceemd == null) { - throw new IllegalStateException(error(source) + "Expression metadata does not exist at" + + throw new IllegalStateException("Expression metadata does not exist at" + " the parse node with text [" + source.getText() + "]."); } @@ -582,7 +533,7 @@ class Metadata { final ExternalMetadata sourceemd = externalMetadata.get(source); if (sourceemd == null) { - throw new IllegalStateException(error(source) + "External metadata does not exist at" + + throw new IllegalStateException("External metadata does not exist at" + " the parse node with text [" + source.getText() + "]."); } @@ -610,7 +561,7 @@ class Metadata { final ExtNodeMetadata sourceemd = extNodeMetadata.get(source); if (sourceemd == null) { - throw new IllegalStateException(error(source) + "External metadata does not exist at" + + throw new IllegalStateException("External metadata does not exist at" + " the parse node with text [" + source.getText() + "]."); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Writer.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Writer.java index 431e724127f..4ddb260aea0 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Writer.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Writer.java @@ -19,20 +19,11 @@ package org.elasticsearch.painless; -import org.antlr.v4.runtime.ParserRuleContext; import org.antlr.v4.runtime.tree.ParseTree; -import org.elasticsearch.painless.Definition.Cast; -import org.elasticsearch.painless.Definition.Constructor; -import org.elasticsearch.painless.Definition.Field; -import org.elasticsearch.painless.Definition.Method; -import org.elasticsearch.painless.Definition.Sort; -import org.elasticsearch.painless.Definition.Transform; -import org.elasticsearch.painless.Definition.Type; import org.elasticsearch.painless.PainlessParser.AfterthoughtContext; import org.elasticsearch.painless.PainlessParser.ArgumentsContext; import org.elasticsearch.painless.PainlessParser.AssignmentContext; import org.elasticsearch.painless.PainlessParser.BinaryContext; -import org.elasticsearch.painless.PainlessParser.BlockContext; import org.elasticsearch.painless.PainlessParser.BoolContext; import org.elasticsearch.painless.PainlessParser.BreakContext; import org.elasticsearch.painless.PainlessParser.CastContext; @@ -48,7 +39,6 @@ import org.elasticsearch.painless.PainlessParser.DoContext; import org.elasticsearch.painless.PainlessParser.EmptyContext; import org.elasticsearch.painless.PainlessParser.EmptyscopeContext; import org.elasticsearch.painless.PainlessParser.ExprContext; -import org.elasticsearch.painless.PainlessParser.ExpressionContext; import org.elasticsearch.painless.PainlessParser.ExtbraceContext; import org.elasticsearch.painless.PainlessParser.ExtcallContext; import org.elasticsearch.painless.PainlessParser.ExtcastContext; @@ -75,261 +65,82 @@ import org.elasticsearch.painless.PainlessParser.PreincContext; import org.elasticsearch.painless.PainlessParser.ReturnContext; import org.elasticsearch.painless.PainlessParser.SingleContext; import org.elasticsearch.painless.PainlessParser.SourceContext; -import org.elasticsearch.painless.PainlessParser.StatementContext; import org.elasticsearch.painless.PainlessParser.ThrowContext; import org.elasticsearch.painless.PainlessParser.TrapContext; import org.elasticsearch.painless.PainlessParser.TrueContext; import org.elasticsearch.painless.PainlessParser.TryContext; import org.elasticsearch.painless.PainlessParser.UnaryContext; import org.elasticsearch.painless.PainlessParser.WhileContext; -import org.elasticsearch.script.ScoreAccessor; import org.objectweb.asm.ClassWriter; import org.objectweb.asm.Label; import org.objectweb.asm.Opcodes; import org.objectweb.asm.commons.GeneratorAdapter; -import java.lang.invoke.MethodType; -import java.util.ArrayDeque; -import java.util.Deque; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import static org.elasticsearch.painless.PainlessParser.ADD; -import static org.elasticsearch.painless.PainlessParser.BWAND; -import static org.elasticsearch.painless.PainlessParser.BWOR; -import static org.elasticsearch.painless.PainlessParser.BWXOR; -import static org.elasticsearch.painless.PainlessParser.DIV; -import static org.elasticsearch.painless.PainlessParser.LSH; -import static org.elasticsearch.painless.PainlessParser.MUL; -import static org.elasticsearch.painless.PainlessParser.REM; -import static org.elasticsearch.painless.PainlessParser.RSH; -import static org.elasticsearch.painless.PainlessParser.SUB; -import static org.elasticsearch.painless.PainlessParser.USH; +import static org.elasticsearch.painless.WriterConstants.BASE_CLASS_TYPE; +import static org.elasticsearch.painless.WriterConstants.CLASS_TYPE; +import static org.elasticsearch.painless.WriterConstants.CONSTRUCTOR; +import static org.elasticsearch.painless.WriterConstants.EXECUTE; +import static org.elasticsearch.painless.WriterConstants.MAP_GET; +import static org.elasticsearch.painless.WriterConstants.MAP_TYPE; +import static org.elasticsearch.painless.WriterConstants.SCORE_ACCESSOR_FLOAT; +import static org.elasticsearch.painless.WriterConstants.SCORE_ACCESSOR_TYPE; +import static org.elasticsearch.painless.WriterConstants.SIGNATURE; class Writer extends PainlessParserBaseVisitor { - private static class Branch { - final ParserRuleContext source; - - Label begin = null; - Label end = null; - Label tru = null; - Label fals = null; - - private Branch(final ParserRuleContext source) { - this.source = source; - } - } - - final static String BASE_CLASS_NAME = Executable.class.getName(); - final static String CLASS_NAME = BASE_CLASS_NAME + "$CompiledPainlessExecutable"; - private final static org.objectweb.asm.Type BASE_CLASS_TYPE = org.objectweb.asm.Type.getType(Executable.class); - private final static org.objectweb.asm.Type CLASS_TYPE = org.objectweb.asm.Type.getType("L" + CLASS_NAME.replace(".", "/") + ";"); - - private final static org.objectweb.asm.commons.Method CONSTRUCTOR = - getAsmMethod(void.class, "", Definition.class, String.class, String.class); - private final static org.objectweb.asm.commons.Method EXECUTE = getAsmMethod(Object.class, "execute", Map.class); - private final static String SIGNATURE = "(Ljava/util/Map;)Ljava/lang/Object;"; - - private final static org.objectweb.asm.Type PAINLESS_ERROR_TYPE = org.objectweb.asm.Type.getType(PainlessError.class); - - private final static org.objectweb.asm.Type DEFINITION_TYPE = org.objectweb.asm.Type.getType(Definition.class); - - private final static org.objectweb.asm.Type MAP_TYPE = org.objectweb.asm.Type.getType(Map.class); - private final static org.objectweb.asm.commons.Method MAP_GET = getAsmMethod(Object.class, "get", Object.class); - - private final static org.objectweb.asm.Type SCORE_ACCESSOR_TYPE = org.objectweb.asm.Type.getType(ScoreAccessor.class); - private final static org.objectweb.asm.commons.Method SCORE_ACCESSOR_FLOAT = getAsmMethod(float.class, "floatValue"); - - private final static org.objectweb.asm.commons.Method DEF_METHOD_CALL = getAsmMethod( - Object.class, "methodCall", Object.class, String.class, Definition.class, Object[].class, boolean[].class); - private final static org.objectweb.asm.commons.Method DEF_ARRAY_STORE = getAsmMethod( - void.class, "arrayStore", Object.class, Object.class, Object.class, Definition.class, boolean.class, boolean.class); - private final static org.objectweb.asm.commons.Method DEF_ARRAY_LOAD = getAsmMethod( - Object.class, "arrayLoad", Object.class, Object.class, Definition.class, boolean.class); - private final static org.objectweb.asm.commons.Method DEF_FIELD_STORE = getAsmMethod( - void.class, "fieldStore", Object.class, Object.class, String.class, Definition.class, boolean.class); - private final static org.objectweb.asm.commons.Method DEF_FIELD_LOAD = getAsmMethod( - Object.class, "fieldLoad", Object.class, String.class, Definition.class); - - private final static org.objectweb.asm.commons.Method DEF_NOT_CALL = getAsmMethod(Object.class, "not", Object.class); - private final static org.objectweb.asm.commons.Method DEF_NEG_CALL = getAsmMethod(Object.class, "neg", Object.class); - private final static org.objectweb.asm.commons.Method DEF_MUL_CALL = getAsmMethod(Object.class, "mul", Object.class, Object.class); - private final static org.objectweb.asm.commons.Method DEF_DIV_CALL = getAsmMethod(Object.class, "div", Object.class, Object.class); - private final static org.objectweb.asm.commons.Method DEF_REM_CALL = getAsmMethod(Object.class, "rem", Object.class, Object.class); - private final static org.objectweb.asm.commons.Method DEF_ADD_CALL = getAsmMethod(Object.class, "add", Object.class, Object.class); - private final static org.objectweb.asm.commons.Method DEF_SUB_CALL = getAsmMethod(Object.class, "sub", Object.class, Object.class); - private final static org.objectweb.asm.commons.Method DEF_LSH_CALL = getAsmMethod(Object.class, "lsh", Object.class, Object.class); - private final static org.objectweb.asm.commons.Method DEF_RSH_CALL = getAsmMethod(Object.class, "rsh", Object.class, Object.class); - private final static org.objectweb.asm.commons.Method DEF_USH_CALL = getAsmMethod(Object.class, "ush", Object.class, Object.class); - private final static org.objectweb.asm.commons.Method DEF_AND_CALL = getAsmMethod(Object.class, "and", Object.class, Object.class); - private final static org.objectweb.asm.commons.Method DEF_XOR_CALL = getAsmMethod(Object.class, "xor", Object.class, Object.class); - private final static org.objectweb.asm.commons.Method DEF_OR_CALL = getAsmMethod(Object.class, "or" , Object.class, Object.class); - private final static org.objectweb.asm.commons.Method DEF_EQ_CALL = getAsmMethod(boolean.class, "eq" , Object.class, Object.class); - private final static org.objectweb.asm.commons.Method DEF_LT_CALL = getAsmMethod(boolean.class, "lt" , Object.class, Object.class); - private final static org.objectweb.asm.commons.Method DEF_LTE_CALL = getAsmMethod(boolean.class, "lte", Object.class, Object.class); - private final static org.objectweb.asm.commons.Method DEF_GT_CALL = getAsmMethod(boolean.class, "gt" , Object.class, Object.class); - private final static org.objectweb.asm.commons.Method DEF_GTE_CALL = getAsmMethod(boolean.class, "gte", Object.class, Object.class); - - private final static org.objectweb.asm.Type STRINGBUILDER_TYPE = org.objectweb.asm.Type.getType(StringBuilder.class); - - private final static org.objectweb.asm.commons.Method STRINGBUILDER_CONSTRUCTOR = getAsmMethod(void.class, ""); - private final static org.objectweb.asm.commons.Method STRINGBUILDER_APPEND_BOOLEAN = - getAsmMethod(StringBuilder.class, "append", boolean.class); - private final static org.objectweb.asm.commons.Method STRINGBUILDER_APPEND_CHAR = - getAsmMethod(StringBuilder.class, "append", char.class); - private final static org.objectweb.asm.commons.Method STRINGBUILDER_APPEND_INT = - getAsmMethod(StringBuilder.class, "append", int.class); - private final static org.objectweb.asm.commons.Method STRINGBUILDER_APPEND_LONG = - getAsmMethod(StringBuilder.class, "append", long.class); - private final static org.objectweb.asm.commons.Method STRINGBUILDER_APPEND_FLOAT = - getAsmMethod(StringBuilder.class, "append", float.class); - private final static org.objectweb.asm.commons.Method STRINGBUILDER_APPEND_DOUBLE = - getAsmMethod(StringBuilder.class, "append", double.class); - private final static org.objectweb.asm.commons.Method STRINGBUILDER_APPEND_STRING = - getAsmMethod(StringBuilder.class, "append", String.class); - private final static org.objectweb.asm.commons.Method STRINGBUILDER_APPEND_OBJECT = - getAsmMethod(StringBuilder.class, "append", Object.class); - private final static org.objectweb.asm.commons.Method STRINGBUILDER_TOSTRING = getAsmMethod(String.class, "toString"); - - private final static org.objectweb.asm.commons.Method TOINTEXACT_LONG = getAsmMethod(int.class, "toIntExact", long.class); - private final static org.objectweb.asm.commons.Method NEGATEEXACT_INT = getAsmMethod(int.class, "negateExact", int.class); - private final static org.objectweb.asm.commons.Method NEGATEEXACT_LONG = getAsmMethod(long.class, "negateExact", long.class); - private final static org.objectweb.asm.commons.Method MULEXACT_INT = getAsmMethod(int.class, "multiplyExact", int.class, int.class); - private final static org.objectweb.asm.commons.Method MULEXACT_LONG = getAsmMethod(long.class, "multiplyExact", long.class, long.class); - private final static org.objectweb.asm.commons.Method ADDEXACT_INT = getAsmMethod(int.class, "addExact", int.class, int.class); - private final static org.objectweb.asm.commons.Method ADDEXACT_LONG = getAsmMethod(long.class, "addExact", long.class, long.class); - private final static org.objectweb.asm.commons.Method SUBEXACT_INT = getAsmMethod(int.class, "subtractExact", int.class, int.class); - private final static org.objectweb.asm.commons.Method SUBEXACT_LONG = getAsmMethod(long.class, "subtractExact", long.class, long.class); - - private final static org.objectweb.asm.commons.Method CHECKEQUALS = - getAsmMethod(boolean.class, "checkEquals", Object.class, Object.class); - private final static org.objectweb.asm.commons.Method TOBYTEEXACT_INT = getAsmMethod(byte.class, "toByteExact", int.class); - private final static org.objectweb.asm.commons.Method TOBYTEEXACT_LONG = getAsmMethod(byte.class, "toByteExact", long.class); - private final static org.objectweb.asm.commons.Method TOBYTEWOOVERFLOW_FLOAT = - getAsmMethod(byte.class, "toByteWithoutOverflow", float.class); - private final static org.objectweb.asm.commons.Method TOBYTEWOOVERFLOW_DOUBLE = - getAsmMethod(byte.class, "toByteWithoutOverflow", double.class); - private final static org.objectweb.asm.commons.Method TOSHORTEXACT_INT = getAsmMethod(short.class, "toShortExact", int.class); - private final static org.objectweb.asm.commons.Method TOSHORTEXACT_LONG = getAsmMethod(short.class, "toShortExact", long.class); - private final static org.objectweb.asm.commons.Method TOSHORTWOOVERFLOW_FLOAT = - getAsmMethod(short.class, "toShortWithoutOverflow", float.class); - private final static org.objectweb.asm.commons.Method TOSHORTWOOVERFLOW_DOUBLE = - getAsmMethod(short.class, "toShortWihtoutOverflow", double.class); - private final static org.objectweb.asm.commons.Method TOCHAREXACT_INT = getAsmMethod(char.class, "toCharExact", int.class); - private final static org.objectweb.asm.commons.Method TOCHAREXACT_LONG = getAsmMethod(char.class, "toCharExact", long.class); - private final static org.objectweb.asm.commons.Method TOCHARWOOVERFLOW_FLOAT = - getAsmMethod(char.class, "toCharWithoutOverflow", float.class); - private final static org.objectweb.asm.commons.Method TOCHARWOOVERFLOW_DOUBLE = - getAsmMethod(char.class, "toCharWithoutOverflow", double.class); - private final static org.objectweb.asm.commons.Method TOINTWOOVERFLOW_FLOAT = - getAsmMethod(int.class, "toIntWithoutOverflow", float.class); - private final static org.objectweb.asm.commons.Method TOINTWOOVERFLOW_DOUBLE = - getAsmMethod(int.class, "toIntWithoutOverflow", double.class); - private final static org.objectweb.asm.commons.Method TOLONGWOOVERFLOW_FLOAT = - getAsmMethod(long.class, "toLongWithoutOverflow", float.class); - private final static org.objectweb.asm.commons.Method TOLONGWOOVERFLOW_DOUBLE = - getAsmMethod(long.class, "toLongWithoutOverflow", double.class); - private final static org.objectweb.asm.commons.Method TOFLOATWOOVERFLOW_DOUBLE = - getAsmMethod(float.class , "toFloatWihtoutOverflow", double.class); - private final static org.objectweb.asm.commons.Method MULWOOVERLOW_FLOAT = - getAsmMethod(float.class, "multiplyWithoutOverflow", float.class, float.class); - private final static org.objectweb.asm.commons.Method MULWOOVERLOW_DOUBLE = - getAsmMethod(double.class, "multiplyWithoutOverflow", double.class, double.class); - private final static org.objectweb.asm.commons.Method DIVWOOVERLOW_INT = - getAsmMethod(int.class, "divideWithoutOverflow", int.class, int.class); - private final static org.objectweb.asm.commons.Method DIVWOOVERLOW_LONG = - getAsmMethod(long.class, "divideWithoutOverflow", long.class, long.class); - private final static org.objectweb.asm.commons.Method DIVWOOVERLOW_FLOAT = - getAsmMethod(float.class, "divideWithoutOverflow", float.class, float.class); - private final static org.objectweb.asm.commons.Method DIVWOOVERLOW_DOUBLE = - getAsmMethod(double.class, "divideWithoutOverflow", double.class, double.class); - private final static org.objectweb.asm.commons.Method REMWOOVERLOW_FLOAT = - getAsmMethod(float.class, "remainderWithoutOverflow", float.class, float.class); - private final static org.objectweb.asm.commons.Method REMWOOVERLOW_DOUBLE = - getAsmMethod(double.class, "remainderWithoutOverflow", double.class, double.class); - private final static org.objectweb.asm.commons.Method ADDWOOVERLOW_FLOAT = - getAsmMethod(float.class, "addWithoutOverflow", float.class, float.class); - private final static org.objectweb.asm.commons.Method ADDWOOVERLOW_DOUBLE = - getAsmMethod(double.class, "addWithoutOverflow", double.class, double.class); - private final static org.objectweb.asm.commons.Method SUBWOOVERLOW_FLOAT = - getAsmMethod(float.class, "subtractWithoutOverflow", float.class, float.class); - private final static org.objectweb.asm.commons.Method SUBWOOVERLOW_DOUBLE = - getAsmMethod(double.class, "subtractWithoutOverflow", double.class, double.class); - - private static org.objectweb.asm.commons.Method getAsmMethod(final Class rtype, final String name, final Class... ptypes) { - return new org.objectweb.asm.commons.Method(name, MethodType.methodType(rtype, ptypes).toMethodDescriptorString()); - } - static byte[] write(Metadata metadata) { - Writer writer = new Writer(metadata); + final Writer writer = new Writer(metadata); return writer.getBytes(); } private final Metadata metadata; - private final Definition definition; private final ParseTree root; private final String source; private final CompilerSettings settings; - private final Map branches = new HashMap<>(); - private final Deque jumps = new ArrayDeque<>(); - private final Set strings = new HashSet<>(); + private final ClassWriter writer; + private final GeneratorAdapter execute; - private ClassWriter writer; - private GeneratorAdapter execute; + private final WriterStatement statement; + private final WriterExpression expression; + private final WriterExternal external; private Writer(final Metadata metadata) { this.metadata = metadata; - definition = metadata.definition; root = metadata.root; source = metadata.source; settings = metadata.settings; + writer = new ClassWriter(ClassWriter.COMPUTE_FRAMES | ClassWriter.COMPUTE_MAXS); + writeBegin(); writeConstructor(); + + execute = new GeneratorAdapter(Opcodes.ACC_PUBLIC, EXECUTE, SIGNATURE, null, writer); + + final WriterUtility utility = new WriterUtility(metadata, execute); + final WriterCaster caster = new WriterCaster(execute); + + statement = new WriterStatement(metadata, execute, this, utility); + expression = new WriterExpression(metadata, execute, this, utility, caster); + external = new WriterExternal(metadata, execute, this, utility, caster); + writeExecute(); writeEnd(); } - private Branch markBranch(final ParserRuleContext source, final ParserRuleContext... nodes) { - final Branch branch = new Branch(source); - - for (final ParserRuleContext node : nodes) { - branches.put(node, branch); - } - - return branch; - } - - private void copyBranch(final Branch branch, final ParserRuleContext... nodes) { - for (final ParserRuleContext node : nodes) { - branches.put(node, branch); - } - } - - private Branch getBranch(final ParserRuleContext source) { - return branches.get(source); - } - private void writeBegin() { - final int compute = ClassWriter.COMPUTE_FRAMES | ClassWriter.COMPUTE_MAXS; final int version = Opcodes.V1_7; final int access = Opcodes.ACC_PUBLIC | Opcodes.ACC_SUPER | Opcodes.ACC_FINAL; final String base = BASE_CLASS_TYPE.getInternalName(); final String name = CLASS_TYPE.getInternalName(); - writer = new ClassWriter(compute); writer.visit(version, access, name, null, base, null); writer.visitSource(source, null); } private void writeConstructor() { - final int access = Opcodes.ACC_PUBLIC; - final GeneratorAdapter constructor = new GeneratorAdapter(access, CONSTRUCTOR, null, null, writer); + final GeneratorAdapter constructor = new GeneratorAdapter(Opcodes.ACC_PUBLIC, CONSTRUCTOR, null, null, writer); constructor.loadThis(); constructor.loadArgs(); constructor.invokeConstructor(org.objectweb.asm.Type.getType(Executable.class), CONSTRUCTOR); @@ -338,9 +149,6 @@ class Writer extends PainlessParserBaseVisitor { } private void writeExecute() { - final int access = Opcodes.ACC_PUBLIC; - execute = new GeneratorAdapter(access, EXECUTE, SIGNATURE, null, writer); - final Label fals = new Label(); final Label end = new Label(); execute.visitVarInsn(Opcodes.ALOAD, metadata.inputValueSlot); @@ -364,2011 +172,6 @@ class Writer extends PainlessParserBaseVisitor { execute.endMethod(); } - @Override - public Void visitSource(final SourceContext ctx) { - final Metadata.StatementMetadata sourcesmd = metadata.getStatementMetadata(ctx); - - for (final StatementContext sctx : ctx.statement()) { - visit(sctx); - } - - if (!sourcesmd.methodEscape) { - execute.visitInsn(Opcodes.ACONST_NULL); - execute.returnValue(); - } - - return null; - } - - @Override - public Void visitIf(final IfContext ctx) { - final ExpressionContext exprctx = ctx.expression(); - final boolean els = ctx.ELSE() != null; - final Branch branch = markBranch(ctx, exprctx); - branch.end = new Label(); - branch.fals = els ? new Label() : branch.end; - - visit(exprctx); - - final BlockContext blockctx0 = ctx.block(0); - final Metadata.StatementMetadata blockmd0 = metadata.getStatementMetadata(blockctx0); - visit(blockctx0); - - if (els) { - if (!blockmd0.allLast) { - execute.goTo(branch.end); - } - - execute.mark(branch.fals); - visit(ctx.block(1)); - } - - execute.mark(branch.end); - - return null; - } - - @Override - public Void visitWhile(final WhileContext ctx) { - final ExpressionContext exprctx = ctx.expression(); - final Branch branch = markBranch(ctx, exprctx); - branch.begin = new Label(); - branch.end = new Label(); - branch.fals = branch.end; - - jumps.push(branch); - execute.mark(branch.begin); - visit(exprctx); - - final BlockContext blockctx = ctx.block(); - boolean allLast = false; - - if (blockctx != null) { - final Metadata.StatementMetadata blocksmd = metadata.getStatementMetadata(blockctx); - allLast = blocksmd.allLast; - writeLoopCounter(blocksmd.count > 0 ? blocksmd.count : 1); - visit(blockctx); - } else if (ctx.empty() != null) { - writeLoopCounter(1); - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected writer state."); - } - - if (!allLast) { - execute.goTo(branch.begin); - } - - execute.mark(branch.end); - jumps.pop(); - - return null; - } - - @Override - public Void visitDo(final DoContext ctx) { - final ExpressionContext exprctx = ctx.expression(); - final Branch branch = markBranch(ctx, exprctx); - Label start = new Label(); - branch.begin = new Label(); - branch.end = new Label(); - branch.fals = branch.end; - - final BlockContext blockctx = ctx.block(); - final Metadata.StatementMetadata blocksmd = metadata.getStatementMetadata(blockctx); - - jumps.push(branch); - execute.mark(start); - visit(blockctx); - execute.mark(branch.begin); - visit(exprctx); - writeLoopCounter(blocksmd.count > 0 ? blocksmd.count : 1); - execute.goTo(start); - execute.mark(branch.end); - jumps.pop(); - - return null; - } - - @Override - public Void visitFor(final ForContext ctx) { - final ExpressionContext exprctx = ctx.expression(); - final AfterthoughtContext atctx = ctx.afterthought(); - final Branch branch = markBranch(ctx, exprctx); - final Label start = new Label(); - branch.begin = atctx == null ? start : new Label(); - branch.end = new Label(); - branch.fals = branch.end; - - jumps.push(branch); - - if (ctx.initializer() != null) { - visit(ctx.initializer()); - } - - execute.mark(start); - - if (exprctx != null) { - visit(exprctx); - } - - final BlockContext blockctx = ctx.block(); - boolean allLast = false; - - if (blockctx != null) { - Metadata.StatementMetadata blocksmd = metadata.getStatementMetadata(blockctx); - allLast = blocksmd.allLast; - - int count = blocksmd.count > 0 ? blocksmd.count : 1; - - if (atctx != null) { - ++count; - } - - writeLoopCounter(count); - visit(blockctx); - } else if (ctx.empty() != null) { - writeLoopCounter(1); - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected writer state."); - } - - if (atctx != null) { - execute.mark(branch.begin); - visit(atctx); - } - - if (atctx != null || !allLast) { - execute.goTo(start); - } - - execute.mark(branch.end); - jumps.pop(); - - return null; - } - - @Override - public Void visitDecl(final DeclContext ctx) { - visit(ctx.declaration()); - - return null; - } - - @Override - public Void visitContinue(final ContinueContext ctx) { - final Branch jump = jumps.peek(); - execute.goTo(jump.begin); - - return null; - } - - @Override - public Void visitBreak(final BreakContext ctx) { - final Branch jump = jumps.peek(); - execute.goTo(jump.end); - - return null; - } - - @Override - public Void visitReturn(final ReturnContext ctx) { - visit(ctx.expression()); - execute.returnValue(); - - return null; - } - - @Override - public Void visitTry(final TryContext ctx) { - final TrapContext[] trapctxs = new TrapContext[ctx.trap().size()]; - ctx.trap().toArray(trapctxs); - final Branch branch = markBranch(ctx, trapctxs); - - Label end = new Label(); - branch.begin = new Label(); - branch.end = new Label(); - branch.tru = trapctxs.length > 1 ? end : null; - - execute.mark(branch.begin); - - final BlockContext blockctx = ctx.block(); - final Metadata.StatementMetadata blocksmd = metadata.getStatementMetadata(blockctx); - visit(blockctx); - - if (!blocksmd.allLast) { - execute.goTo(end); - } - - execute.mark(branch.end); - - for (final TrapContext trapctx : trapctxs) { - visit(trapctx); - } - - if (!blocksmd.allLast || trapctxs.length > 1) { - execute.mark(end); - } - - return null; - } - - @Override - public Void visitThrow(final ThrowContext ctx) { - visit(ctx.expression()); - execute.throwException(); - - return null; - } - - @Override - public Void visitExpr(final ExprContext ctx) { - final Metadata.StatementMetadata exprsmd = metadata.getStatementMetadata(ctx); - final ExpressionContext exprctx = ctx.expression(); - final Metadata.ExpressionMetadata expremd = metadata.getExpressionMetadata(exprctx); - visit(exprctx); - - if (exprsmd.methodEscape) { - execute.returnValue(); - } else { - writePop(expremd.to.type.getSize()); - } - - return null; - } - - @Override - public Void visitMultiple(final MultipleContext ctx) { - for (final StatementContext sctx : ctx.statement()) { - visit(sctx); - } - - return null; - } - - @Override - public Void visitSingle(final SingleContext ctx) { - visit(ctx.statement()); - - return null; - } - - @Override - public Void visitEmpty(final EmptyContext ctx) { - throw new UnsupportedOperationException(Metadata.error(ctx) + "Unexpected writer state."); - } - - @Override - public Void visitInitializer(InitializerContext ctx) { - final DeclarationContext declctx = ctx.declaration(); - final ExpressionContext exprctx = ctx.expression(); - - if (declctx != null) { - visit(declctx); - } else if (exprctx != null) { - final Metadata.ExpressionMetadata expremd = metadata.getExpressionMetadata(exprctx); - visit(exprctx); - writePop(expremd.to.type.getSize()); - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected writer state."); - } - - return null; - } - - @Override - public Void visitAfterthought(AfterthoughtContext ctx) { - final ExpressionContext exprctx = ctx.expression(); - final Metadata.ExpressionMetadata expremd = metadata.getExpressionMetadata(exprctx); - visit(ctx.expression()); - writePop(expremd.to.type.getSize()); - - return null; - } - - @Override - public Void visitDeclaration(DeclarationContext ctx) { - for (final DeclvarContext declctx : ctx.declvar()) { - visit(declctx); - } - - return null; - } - - @Override - public Void visitDecltype(final DecltypeContext ctx) { - throw new UnsupportedOperationException(Metadata.error(ctx) + "Unexpected writer state."); - } - - @Override - public Void visitDeclvar(final DeclvarContext ctx) { - final Metadata.ExpressionMetadata declvaremd = metadata.getExpressionMetadata(ctx); - final org.objectweb.asm.Type type = declvaremd.to.type; - final Sort sort = declvaremd.to.sort; - final int slot = (int)declvaremd.postConst; - - final ExpressionContext exprctx = ctx.expression(); - final boolean initialize = exprctx == null; - - if (!initialize) { - visit(exprctx); - } - - switch (sort) { - case VOID: throw new IllegalStateException(Metadata.error(ctx) + "Unexpected writer state."); - case BOOL: - case BYTE: - case SHORT: - case CHAR: - case INT: if (initialize) execute.push(0); break; - case LONG: if (initialize) execute.push(0L); break; - case FLOAT: if (initialize) execute.push(0.0F); break; - case DOUBLE: if (initialize) execute.push(0.0); break; - default: if (initialize) execute.visitInsn(Opcodes.ACONST_NULL); - } - - execute.visitVarInsn(type.getOpcode(Opcodes.ISTORE), slot); - - return null; - } - - @Override - public Void visitTrap(final TrapContext ctx) { - final Metadata.StatementMetadata trapsmd = metadata.getStatementMetadata(ctx); - - final Branch branch = getBranch(ctx); - final Label jump = new Label(); - - final BlockContext blockctx = ctx.block(); - final EmptyscopeContext emptyctx = ctx.emptyscope(); - - execute.mark(jump); - writeLoadStoreVariable(ctx, true, trapsmd.exception, trapsmd.slot); - - if (blockctx != null) { - visit(ctx.block()); - } else if (emptyctx == null) { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected writer state."); - } - - execute.visitTryCatchBlock(branch.begin, branch.end, jump, trapsmd.exception.type.getInternalName()); - - if (branch.tru != null && !trapsmd.allLast) { - execute.goTo(branch.tru); - } - - return null; - } - - @Override - public Void visitPrecedence(final PrecedenceContext ctx) { - throw new UnsupportedOperationException(Metadata.error(ctx) + "Unexpected writer state."); - } - - @Override - public Void visitNumeric(final NumericContext ctx) { - final Metadata.ExpressionMetadata numericemd = metadata.getExpressionMetadata(ctx); - final Object postConst = numericemd.postConst; - - if (postConst == null) { - writeNumeric(ctx, numericemd.preConst); - checkWriteCast(numericemd); - } else { - writeConstant(ctx, postConst); - } - - checkWriteBranch(ctx); - - return null; - } - - @Override - public Void visitChar(final CharContext ctx) { - final Metadata.ExpressionMetadata charemd = metadata.getExpressionMetadata(ctx); - final Object postConst = charemd.postConst; - - if (postConst == null) { - writeNumeric(ctx, (int)(char)charemd.preConst); - checkWriteCast(charemd); - } else { - writeConstant(ctx, postConst); - } - - checkWriteBranch(ctx); - - return null; - } - - @Override - public Void visitTrue(final TrueContext ctx) { - final Metadata.ExpressionMetadata trueemd = metadata.getExpressionMetadata(ctx); - final Object postConst = trueemd.postConst; - final Branch branch = getBranch(ctx); - - if (branch == null) { - if (postConst == null) { - writeBoolean(ctx, true); - checkWriteCast(trueemd); - } else { - writeConstant(ctx, postConst); - } - } else if (branch.tru != null) { - execute.goTo(branch.tru); - } - - return null; - } - - @Override - public Void visitFalse(final FalseContext ctx) { - final Metadata.ExpressionMetadata falseemd = metadata.getExpressionMetadata(ctx); - final Object postConst = falseemd.postConst; - final Branch branch = getBranch(ctx); - - if (branch == null) { - if (postConst == null) { - writeBoolean(ctx, false); - checkWriteCast(falseemd); - } else { - writeConstant(ctx, postConst); - } - } else if (branch.fals != null) { - execute.goTo(branch.fals); - } - - return null; - } - - @Override - public Void visitNull(final NullContext ctx) { - final Metadata.ExpressionMetadata nullemd = metadata.getExpressionMetadata(ctx); - - execute.visitInsn(Opcodes.ACONST_NULL); - checkWriteCast(nullemd); - checkWriteBranch(ctx); - - return null; - } - - @Override - public Void visitExternal(final ExternalContext ctx) { - final Metadata.ExpressionMetadata expremd = metadata.getExpressionMetadata(ctx); - visit(ctx.extstart()); - checkWriteCast(expremd); - checkWriteBranch(ctx); - - return null; - } - - - @Override - public Void visitPostinc(final PostincContext ctx) { - final Metadata.ExpressionMetadata expremd = metadata.getExpressionMetadata(ctx); - visit(ctx.extstart()); - checkWriteCast(expremd); - checkWriteBranch(ctx); - - return null; - } - - @Override - public Void visitPreinc(final PreincContext ctx) { - final Metadata.ExpressionMetadata expremd = metadata.getExpressionMetadata(ctx); - visit(ctx.extstart()); - checkWriteCast(expremd); - checkWriteBranch(ctx); - - return null; - } - - @Override - public Void visitUnary(final UnaryContext ctx) { - final Metadata.ExpressionMetadata unaryemd = metadata.getExpressionMetadata(ctx); - final Object postConst = unaryemd.postConst; - final Object preConst = unaryemd.preConst; - final Branch branch = getBranch(ctx); - - if (postConst != null) { - if (ctx.BOOLNOT() != null) { - if (branch == null) { - writeConstant(ctx, postConst); - } else { - if ((boolean)postConst && branch.tru != null) { - execute.goTo(branch.tru); - } else if (!(boolean)postConst && branch.fals != null) { - execute.goTo(branch.fals); - } - } - } else { - writeConstant(ctx, postConst); - checkWriteBranch(ctx); - } - } else if (preConst != null) { - if (branch == null) { - writeConstant(ctx, preConst); - checkWriteCast(unaryemd); - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected writer state."); - } - } else { - final ExpressionContext exprctx = ctx.expression(); - - if (ctx.BOOLNOT() != null) { - final Branch local = markBranch(ctx, exprctx); - - if (branch == null) { - local.fals = new Label(); - final Label aend = new Label(); - - visit(exprctx); - - execute.push(false); - execute.goTo(aend); - execute.mark(local.fals); - execute.push(true); - execute.mark(aend); - - checkWriteCast(unaryemd); - } else { - local.tru = branch.fals; - local.fals = branch.tru; - - visit(exprctx); - } - } else { - final org.objectweb.asm.Type type = unaryemd.from.type; - final Sort sort = unaryemd.from.sort; - - visit(exprctx); - - if (ctx.BWNOT() != null) { - if (sort == Sort.DEF) { - execute.invokeStatic(definition.defobjType.type, DEF_NOT_CALL); - } else { - if (sort == Sort.INT) { - writeConstant(ctx, -1); - } else if (sort == Sort.LONG) { - writeConstant(ctx, -1L); - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected writer state."); - } - - execute.math(GeneratorAdapter.XOR, type); - } - } else if (ctx.SUB() != null) { - if (sort == Sort.DEF) { - execute.invokeStatic(definition.defobjType.type, DEF_NEG_CALL); - } else { - if (settings.getNumericOverflow()) { - execute.math(GeneratorAdapter.NEG, type); - } else { - if (sort == Sort.INT) { - execute.invokeStatic(definition.mathType.type, NEGATEEXACT_INT); - } else if (sort == Sort.LONG) { - execute.invokeStatic(definition.mathType.type, NEGATEEXACT_LONG); - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected writer state."); - } - } - } - } else if (ctx.ADD() == null) { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected writer state."); - } - - checkWriteCast(unaryemd); - checkWriteBranch(ctx); - } - } - - return null; - } - - @Override - public Void visitCast(final CastContext ctx) { - final Metadata.ExpressionMetadata castemd = metadata.getExpressionMetadata(ctx); - final Object postConst = castemd.postConst; - - if (postConst == null) { - visit(ctx.expression()); - checkWriteCast(castemd); - } else { - writeConstant(ctx, postConst); - } - - checkWriteBranch(ctx); - - return null; - } - - @Override - public Void visitBinary(final BinaryContext ctx) { - final Metadata.ExpressionMetadata binaryemd = metadata.getExpressionMetadata(ctx); - final Object postConst = binaryemd.postConst; - final Object preConst = binaryemd.preConst; - final Branch branch = getBranch(ctx); - - if (postConst != null) { - writeConstant(ctx, postConst); - } else if (preConst != null) { - if (branch == null) { - writeConstant(ctx, preConst); - checkWriteCast(binaryemd); - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected writer state."); - } - } else if (binaryemd.from.sort == Sort.STRING) { - final boolean marked = strings.contains(ctx); - - if (!marked) { - writeNewStrings(); - } - - final ExpressionContext exprctx0 = ctx.expression(0); - final Metadata.ExpressionMetadata expremd0 = metadata.getExpressionMetadata(exprctx0); - strings.add(exprctx0); - visit(exprctx0); - - if (strings.contains(exprctx0)) { - writeAppendStrings(expremd0.from.sort); - strings.remove(exprctx0); - } - - final ExpressionContext exprctx1 = ctx.expression(1); - final Metadata.ExpressionMetadata expremd1 = metadata.getExpressionMetadata(exprctx1); - strings.add(exprctx1); - visit(exprctx1); - - if (strings.contains(exprctx1)) { - writeAppendStrings(expremd1.from.sort); - strings.remove(exprctx1); - } - - if (marked) { - strings.remove(ctx); - } else { - writeToStrings(); - } - - checkWriteCast(binaryemd); - } else { - final ExpressionContext exprctx0 = ctx.expression(0); - final ExpressionContext exprctx1 = ctx.expression(1); - - visit(exprctx0); - visit(exprctx1); - - final Type type = binaryemd.from; - - if (ctx.MUL() != null) writeBinaryInstruction(ctx, type, MUL); - else if (ctx.DIV() != null) writeBinaryInstruction(ctx, type, DIV); - else if (ctx.REM() != null) writeBinaryInstruction(ctx, type, REM); - else if (ctx.ADD() != null) writeBinaryInstruction(ctx, type, ADD); - else if (ctx.SUB() != null) writeBinaryInstruction(ctx, type, SUB); - else if (ctx.LSH() != null) writeBinaryInstruction(ctx, type, LSH); - else if (ctx.USH() != null) writeBinaryInstruction(ctx, type, USH); - else if (ctx.RSH() != null) writeBinaryInstruction(ctx, type, RSH); - else if (ctx.BWAND() != null) writeBinaryInstruction(ctx, type, BWAND); - else if (ctx.BWXOR() != null) writeBinaryInstruction(ctx, type, BWXOR); - else if (ctx.BWOR() != null) writeBinaryInstruction(ctx, type, BWOR); - else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected writer state."); - } - - checkWriteCast(binaryemd); - } - - checkWriteBranch(ctx); - - return null; - } - - @Override - public Void visitComp(final CompContext ctx) { - final Metadata.ExpressionMetadata compemd = metadata.getExpressionMetadata(ctx); - final Object postConst = compemd.postConst; - final Object preConst = compemd.preConst; - final Branch branch = getBranch(ctx); - - if (postConst != null) { - if (branch == null) { - writeConstant(ctx, postConst); - } else { - if ((boolean)postConst && branch.tru != null) { - execute.mark(branch.tru); - } else if (!(boolean)postConst && branch.fals != null) { - execute.mark(branch.fals); - } - } - } else if (preConst != null) { - if (branch == null) { - writeConstant(ctx, preConst); - checkWriteCast(compemd); - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected writer state."); - } - } else { - final ExpressionContext exprctx0 = ctx.expression(0); - final Metadata.ExpressionMetadata expremd0 = metadata.getExpressionMetadata(exprctx0); - - final ExpressionContext exprctx1 = ctx.expression(1); - final Metadata.ExpressionMetadata expremd1 = metadata.getExpressionMetadata(exprctx1); - final org.objectweb.asm.Type type = expremd1.to.type; - final Sort sort1 = expremd1.to.sort; - - visit(exprctx0); - - if (!expremd1.isNull) { - visit(exprctx1); - } - - final boolean tru = branch != null && branch.tru != null; - final boolean fals = branch != null && branch.fals != null; - final Label jump = tru ? branch.tru : fals ? branch.fals : new Label(); - final Label end = new Label(); - - final boolean eq = (ctx.EQ() != null || ctx.EQR() != null) && (tru || !fals) || - (ctx.NE() != null || ctx.NER() != null) && fals; - final boolean ne = (ctx.NE() != null || ctx.NER() != null) && (tru || !fals) || - (ctx.EQ() != null || ctx.EQR() != null) && fals; - final boolean lt = ctx.LT() != null && (tru || !fals) || ctx.GTE() != null && fals; - final boolean lte = ctx.LTE() != null && (tru || !fals) || ctx.GT() != null && fals; - final boolean gt = ctx.GT() != null && (tru || !fals) || ctx.LTE() != null && fals; - final boolean gte = ctx.GTE() != null && (tru || !fals) || ctx.LT() != null && fals; - - boolean writejump = true; - - switch (sort1) { - case VOID: - case BYTE: - case SHORT: - case CHAR: - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected writer state."); - case BOOL: - if (eq) execute.ifZCmp(GeneratorAdapter.EQ, jump); - else if (ne) execute.ifZCmp(GeneratorAdapter.NE, jump); - else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected writer state."); - } - - break; - case INT: - case LONG: - case FLOAT: - case DOUBLE: - if (eq) execute.ifCmp(type, GeneratorAdapter.EQ, jump); - else if (ne) execute.ifCmp(type, GeneratorAdapter.NE, jump); - else if (lt) execute.ifCmp(type, GeneratorAdapter.LT, jump); - else if (lte) execute.ifCmp(type, GeneratorAdapter.LE, jump); - else if (gt) execute.ifCmp(type, GeneratorAdapter.GT, jump); - else if (gte) execute.ifCmp(type, GeneratorAdapter.GE, jump); - else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected writer state."); - } - - break; - case DEF: - if (eq) { - if (expremd1.isNull) { - execute.ifNull(jump); - } else if (!expremd0.isNull && ctx.EQ() != null) { - execute.invokeStatic(definition.defobjType.type, DEF_EQ_CALL); - } else { - execute.ifCmp(type, GeneratorAdapter.EQ, jump); - } - } else if (ne) { - if (expremd1.isNull) { - execute.ifNonNull(jump); - } else if (!expremd0.isNull && ctx.NE() != null) { - execute.invokeStatic(definition.defobjType.type, DEF_EQ_CALL); - execute.ifZCmp(GeneratorAdapter.EQ, jump); - } else { - execute.ifCmp(type, GeneratorAdapter.NE, jump); - } - } else if (lt) { - execute.invokeStatic(definition.defobjType.type, DEF_LT_CALL); - } else if (lte) { - execute.invokeStatic(definition.defobjType.type, DEF_LTE_CALL); - } else if (gt) { - execute.invokeStatic(definition.defobjType.type, DEF_GT_CALL); - } else if (gte) { - execute.invokeStatic(definition.defobjType.type, DEF_GTE_CALL); - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected writer state."); - } - - writejump = expremd1.isNull || ne || ctx.EQR() != null; - - if (branch != null && !writejump) { - execute.ifZCmp(GeneratorAdapter.NE, jump); - } - - break; - default: - if (eq) { - if (expremd1.isNull) { - execute.ifNull(jump); - } else if (ctx.EQ() != null) { - execute.invokeStatic(definition.utilityType.type, CHECKEQUALS); - - if (branch != null) { - execute.ifZCmp(GeneratorAdapter.NE, jump); - } - - writejump = false; - } else { - execute.ifCmp(type, GeneratorAdapter.EQ, jump); - } - } else if (ne) { - if (expremd1.isNull) { - execute.ifNonNull(jump); - } else if (ctx.NE() != null) { - execute.invokeStatic(definition.utilityType.type, CHECKEQUALS); - execute.ifZCmp(GeneratorAdapter.EQ, jump); - } else { - execute.ifCmp(type, GeneratorAdapter.NE, jump); - } - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected writer state."); - } - } - - if (branch == null) { - if (writejump) { - execute.push(false); - execute.goTo(end); - execute.mark(jump); - execute.push(true); - execute.mark(end); - } - - checkWriteCast(compemd); - } - } - - return null; - } - - @Override - public Void visitBool(final BoolContext ctx) { - final Metadata.ExpressionMetadata boolemd = metadata.getExpressionMetadata(ctx); - final Object postConst = boolemd.postConst; - final Object preConst = boolemd.preConst; - final Branch branch = getBranch(ctx); - - if (postConst != null) { - if (branch == null) { - writeConstant(ctx, postConst); - } else { - if ((boolean)postConst && branch.tru != null) { - execute.mark(branch.tru); - } else if (!(boolean)postConst && branch.fals != null) { - execute.mark(branch.fals); - } - } - } else if (preConst != null) { - if (branch == null) { - writeConstant(ctx, preConst); - checkWriteCast(boolemd); - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected writer state."); - } - } else { - final ExpressionContext exprctx0 = ctx.expression(0); - final ExpressionContext exprctx1 = ctx.expression(1); - - if (branch == null) { - if (ctx.BOOLAND() != null) { - final Branch local = markBranch(ctx, exprctx0, exprctx1); - local.fals = new Label(); - final Label end = new Label(); - - visit(exprctx0); - visit(exprctx1); - - execute.push(true); - execute.goTo(end); - execute.mark(local.fals); - execute.push(false); - execute.mark(end); - } else if (ctx.BOOLOR() != null) { - final Branch branch0 = markBranch(ctx, exprctx0); - branch0.tru = new Label(); - final Branch branch1 = markBranch(ctx, exprctx1); - branch1.fals = new Label(); - final Label aend = new Label(); - - visit(exprctx0); - visit(exprctx1); - - execute.mark(branch0.tru); - execute.push(true); - execute.goTo(aend); - execute.mark(branch1.fals); - execute.push(false); - execute.mark(aend); - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected writer state."); - } - - checkWriteCast(boolemd); - } else { - if (ctx.BOOLAND() != null) { - final Branch branch0 = markBranch(ctx, exprctx0); - branch0.fals = branch.fals == null ? new Label() : branch.fals; - final Branch branch1 = markBranch(ctx, exprctx1); - branch1.tru = branch.tru; - branch1.fals = branch.fals; - - visit(exprctx0); - visit(exprctx1); - - if (branch.fals == null) { - execute.mark(branch0.fals); - } - } else if (ctx.BOOLOR() != null) { - final Branch branch0 = markBranch(ctx, exprctx0); - branch0.tru = branch.tru == null ? new Label() : branch.tru; - final Branch branch1 = markBranch(ctx, exprctx1); - branch1.tru = branch.tru; - branch1.fals = branch.fals; - - visit(exprctx0); - visit(exprctx1); - - if (branch.tru == null) { - execute.mark(branch0.tru); - } - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected writer state."); - } - } - } - - return null; - } - - @Override - public Void visitConditional(final ConditionalContext ctx) { - final Metadata.ExpressionMetadata condemd = metadata.getExpressionMetadata(ctx); - final Branch branch = getBranch(ctx); - - final ExpressionContext expr0 = ctx.expression(0); - final ExpressionContext expr1 = ctx.expression(1); - final ExpressionContext expr2 = ctx.expression(2); - - final Branch local = markBranch(ctx, expr0); - local.fals = new Label(); - local.end = new Label(); - - if (branch != null) { - copyBranch(branch, expr1, expr2); - } - - visit(expr0); - visit(expr1); - execute.goTo(local.end); - execute.mark(local.fals); - visit(expr2); - execute.mark(local.end); - - if (branch == null) { - checkWriteCast(condemd); - } - - return null; - } - - @Override - public Void visitAssignment(final AssignmentContext ctx) { - final Metadata.ExpressionMetadata expremd = metadata.getExpressionMetadata(ctx); - visit(ctx.extstart()); - checkWriteCast(expremd); - checkWriteBranch(ctx); - - return null; - } - - @Override - public Void visitExtstart(ExtstartContext ctx) { - final Metadata.ExternalMetadata startemd = metadata.getExternalMetadata(ctx); - - if (startemd.token == ADD) { - final Metadata.ExpressionMetadata storeemd = metadata.getExpressionMetadata(startemd.storeExpr); - - if (startemd.current.sort == Sort.STRING || storeemd.from.sort == Sort.STRING) { - writeNewStrings(); - strings.add(startemd.storeExpr); - } - } - - final ExtprecContext precctx = ctx.extprec(); - final ExtcastContext castctx = ctx.extcast(); - final ExttypeContext typectx = ctx.exttype(); - final ExtvarContext varctx = ctx.extvar(); - final ExtnewContext newctx = ctx.extnew(); - final ExtstringContext stringctx = ctx.extstring(); - - if (precctx != null) { - visit(precctx); - } else if (castctx != null) { - visit(castctx); - } else if (typectx != null) { - visit(typectx); - } else if (varctx != null) { - visit(varctx); - } else if (newctx != null) { - visit(newctx); - } else if (stringctx != null) { - visit(stringctx); - } else { - throw new IllegalStateException(); - } - - return null; - } - - @Override - public Void visitExtprec(final ExtprecContext ctx) { - final ExtprecContext precctx = ctx.extprec(); - final ExtcastContext castctx = ctx.extcast(); - final ExttypeContext typectx = ctx.exttype(); - final ExtvarContext varctx = ctx.extvar(); - final ExtnewContext newctx = ctx.extnew(); - final ExtstringContext stringctx = ctx.extstring(); - - if (precctx != null) { - visit(precctx); - } else if (castctx != null) { - visit(castctx); - } else if (typectx != null) { - visit(typectx); - } else if (varctx != null) { - visit(varctx); - } else if (newctx != null) { - visit(newctx); - } else if (stringctx != null) { - visit(stringctx); - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected writer state."); - } - - final ExtdotContext dotctx = ctx.extdot(); - final ExtbraceContext bracectx = ctx.extbrace(); - - if (dotctx != null) { - visit(dotctx); - } else if (bracectx != null) { - visit(bracectx); - } - - return null; - } - - @Override - public Void visitExtcast(final ExtcastContext ctx) { - Metadata.ExtNodeMetadata castenmd = metadata.getExtNodeMetadata(ctx); - - final ExtprecContext precctx = ctx.extprec(); - final ExtcastContext castctx = ctx.extcast(); - final ExttypeContext typectx = ctx.exttype(); - final ExtvarContext varctx = ctx.extvar(); - final ExtnewContext newctx = ctx.extnew(); - final ExtstringContext stringctx = ctx.extstring(); - - if (precctx != null) { - visit(precctx); - } else if (castctx != null) { - visit(castctx); - } else if (typectx != null) { - visit(typectx); - } else if (varctx != null) { - visit(varctx); - } else if (newctx != null) { - visit(newctx); - } else if (stringctx != null) { - visit(stringctx); - } else { - throw new IllegalStateException(Metadata.error(ctx) + "Unexpected writer state."); - } - - checkWriteCast(ctx, castenmd.castTo); - - return null; - } - - @Override - public Void visitExtbrace(final ExtbraceContext ctx) { - final ExpressionContext exprctx = metadata.updateExpressionTree(ctx.expression()); - - visit(exprctx); - writeLoadStoreExternal(ctx); - - final ExtdotContext dotctx = ctx.extdot(); - final ExtbraceContext bracectx = ctx.extbrace(); - - if (dotctx != null) { - visit(dotctx); - } else if (bracectx != null) { - visit(bracectx); - } - - return null; - } - - @Override - public Void visitExtdot(final ExtdotContext ctx) { - final ExtcallContext callctx = ctx.extcall(); - final ExtfieldContext fieldctx = ctx.extfield(); - - if (callctx != null) { - visit(callctx); - } else if (fieldctx != null) { - visit(fieldctx); - } - - return null; - } - - @Override - public Void visitExttype(final ExttypeContext ctx) { - visit(ctx.extdot()); - - return null; - } - - @Override - public Void visitExtcall(final ExtcallContext ctx) { - writeCallExternal(ctx); - - final ExtdotContext dotctx = ctx.extdot(); - final ExtbraceContext bracectx = ctx.extbrace(); - - if (dotctx != null) { - visit(dotctx); - } else if (bracectx != null) { - visit(bracectx); - } - - return null; - } - - @Override - public Void visitExtvar(final ExtvarContext ctx) { - writeLoadStoreExternal(ctx); - - final ExtdotContext dotctx = ctx.extdot(); - final ExtbraceContext bracectx = ctx.extbrace(); - - if (dotctx != null) { - visit(dotctx); - } else if (bracectx != null) { - visit(bracectx); - } - - return null; - } - - @Override - public Void visitExtfield(final ExtfieldContext ctx) { - writeLoadStoreExternal(ctx); - - final ExtdotContext dotctx = ctx.extdot(); - final ExtbraceContext bracectx = ctx.extbrace(); - - if (dotctx != null) { - visit(dotctx); - } else if (bracectx != null) { - visit(bracectx); - } - - return null; - } - - @Override - public Void visitExtnew(ExtnewContext ctx) { - writeNewExternal(ctx); - - final ExtdotContext dotctx = ctx.extdot(); - final ExtbraceContext bracectx = ctx.extbrace(); - - if (dotctx != null) { - visit(dotctx); - } else if (bracectx != null) { - visit(bracectx); - } - - return null; - } - - @Override - public Void visitExtstring(ExtstringContext ctx) { - final Metadata.ExtNodeMetadata stringenmd = metadata.getExtNodeMetadata(ctx); - - writeConstant(ctx, stringenmd.target); - - final ExtdotContext dotctx = ctx.extdot(); - final ExtbraceContext bracectx = ctx.extbrace(); - - if (dotctx != null) { - visit(dotctx); - } else if (bracectx != null) { - visit(bracectx); - } - - return null; - } - - @Override - public Void visitArguments(final ArgumentsContext ctx) { - throw new UnsupportedOperationException(Metadata.error(ctx) + "Unexpected writer state."); - } - - @Override - public Void visitIncrement(IncrementContext ctx) { - final Metadata.ExpressionMetadata incremd = metadata.getExpressionMetadata(ctx); - final Object postConst = incremd.postConst; - - if (postConst == null) { - writeNumeric(ctx, incremd.preConst); - checkWriteCast(incremd); - } else { - writeConstant(ctx, postConst); - } - - checkWriteBranch(ctx); - - return null; - } - - private void writeLoopCounter(final int count) { - final Label end = new Label(); - - execute.iinc(metadata.loopCounterSlot, -count); - execute.visitVarInsn(Opcodes.ILOAD, metadata.loopCounterSlot); - execute.push(0); - execute.ifICmp(GeneratorAdapter.GT, end); - execute.throwException(PAINLESS_ERROR_TYPE, - "The maximum number of statements that can be executed in a loop has been reached."); - execute.mark(end); - } - - private void writeConstant(final ParserRuleContext source, final Object constant) { - if (constant instanceof Number) { - writeNumeric(source, constant); - } else if (constant instanceof Character) { - writeNumeric(source, (int)(char)constant); - } else if (constant instanceof String) { - writeString(source, constant); - } else if (constant instanceof Boolean) { - writeBoolean(source, constant); - } else if (constant != null) { - throw new IllegalStateException(Metadata.error(source) + "Unexpected writer state."); - } - } - - private void writeNumeric(final ParserRuleContext source, final Object numeric) { - if (numeric instanceof Double) { - execute.push((double)numeric); - } else if (numeric instanceof Float) { - execute.push((float)numeric); - } else if (numeric instanceof Long) { - execute.push((long)numeric); - } else if (numeric instanceof Number) { - execute.push(((Number)numeric).intValue()); - } else { - throw new IllegalStateException(Metadata.error(source) + "Unexpected writer state."); - } - } - - private void writeString(final ParserRuleContext source, final Object string) { - if (string instanceof String) { - execute.push((String)string); - } else { - throw new IllegalStateException(Metadata.error(source) + "Unexpected writer state."); - } - } - - private void writeBoolean(final ParserRuleContext source, final Object bool) { - if (bool instanceof Boolean) { - execute.push((boolean)bool); - } else { - throw new IllegalStateException(Metadata.error(source) + "Unexpected writer state."); - } - } - - private void writeNewStrings() { - execute.newInstance(STRINGBUILDER_TYPE); - execute.dup(); - execute.invokeConstructor(STRINGBUILDER_TYPE, STRINGBUILDER_CONSTRUCTOR); - } - - private void writeAppendStrings(final Sort sort) { - switch (sort) { - case BOOL: execute.invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_BOOLEAN); break; - case CHAR: execute.invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_CHAR); break; - case BYTE: - case SHORT: - case INT: execute.invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_INT); break; - case LONG: execute.invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_LONG); break; - case FLOAT: execute.invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_FLOAT); break; - case DOUBLE: execute.invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_DOUBLE); break; - case STRING: execute.invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_STRING); break; - default: execute.invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_OBJECT); - } - } - - private void writeToStrings() { - execute.invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_TOSTRING); - } - - private void writeBinaryInstruction(final ParserRuleContext source, final Type type, final int token) { - final Sort sort = type.sort; - final boolean exact = !settings.getNumericOverflow() && - ((sort == Sort.INT || sort == Sort.LONG) && - (token == MUL || token == DIV || token == ADD || token == SUB) || - (sort == Sort.FLOAT || sort == Sort.DOUBLE) && - (token == MUL || token == DIV || token == REM || token == ADD || token == SUB)); - - // if its a 64-bit shift, fixup the lastSource argument to truncate to 32-bits - // note unlike java, this means we still do binary promotion of shifts, - // but it keeps things simple -- this check works because we promote shifts. - if (sort == Sort.LONG && (token == LSH || token == USH || token == RSH)) { - execute.cast(org.objectweb.asm.Type.LONG_TYPE, org.objectweb.asm.Type.INT_TYPE); - } - - if (exact) { - switch (sort) { - case INT: - switch (token) { - case MUL: execute.invokeStatic(definition.mathType.type, MULEXACT_INT); break; - case DIV: execute.invokeStatic(definition.utilityType.type, DIVWOOVERLOW_INT); break; - case ADD: execute.invokeStatic(definition.mathType.type, ADDEXACT_INT); break; - case SUB: execute.invokeStatic(definition.mathType.type, SUBEXACT_INT); break; - default: - throw new IllegalStateException(Metadata.error(source) + "Unexpected writer state."); - } - - break; - case LONG: - switch (token) { - case MUL: execute.invokeStatic(definition.mathType.type, MULEXACT_LONG); break; - case DIV: execute.invokeStatic(definition.utilityType.type, DIVWOOVERLOW_LONG); break; - case ADD: execute.invokeStatic(definition.mathType.type, ADDEXACT_LONG); break; - case SUB: execute.invokeStatic(definition.mathType.type, SUBEXACT_LONG); break; - default: - throw new IllegalStateException(Metadata.error(source) + "Unexpected writer state."); - } - - break; - case FLOAT: - switch (token) { - case MUL: execute.invokeStatic(definition.utilityType.type, MULWOOVERLOW_FLOAT); break; - case DIV: execute.invokeStatic(definition.utilityType.type, DIVWOOVERLOW_FLOAT); break; - case REM: execute.invokeStatic(definition.utilityType.type, REMWOOVERLOW_FLOAT); break; - case ADD: execute.invokeStatic(definition.utilityType.type, ADDWOOVERLOW_FLOAT); break; - case SUB: execute.invokeStatic(definition.utilityType.type, SUBWOOVERLOW_FLOAT); break; - default: - throw new IllegalStateException(Metadata.error(source) + "Unexpected writer state."); - } - - break; - case DOUBLE: - switch (token) { - case MUL: execute.invokeStatic(definition.utilityType.type, MULWOOVERLOW_DOUBLE); break; - case DIV: execute.invokeStatic(definition.utilityType.type, DIVWOOVERLOW_DOUBLE); break; - case REM: execute.invokeStatic(definition.utilityType.type, REMWOOVERLOW_DOUBLE); break; - case ADD: execute.invokeStatic(definition.utilityType.type, ADDWOOVERLOW_DOUBLE); break; - case SUB: execute.invokeStatic(definition.utilityType.type, SUBWOOVERLOW_DOUBLE); break; - default: - throw new IllegalStateException(Metadata.error(source) + "Unexpected writer state."); - } - - break; - default: - throw new IllegalStateException(Metadata.error(source) + "Unexpected writer state."); - } - } else { - if ((sort == Sort.FLOAT || sort == Sort.DOUBLE) && - (token == LSH || token == USH || token == RSH || token == BWAND || token == BWXOR || token == BWOR)) { - throw new IllegalStateException(Metadata.error(source) + "Unexpected writer state."); - } - - if (sort == Sort.DEF) { - switch (token) { - case MUL: execute.invokeStatic(definition.defobjType.type, DEF_MUL_CALL); break; - case DIV: execute.invokeStatic(definition.defobjType.type, DEF_DIV_CALL); break; - case REM: execute.invokeStatic(definition.defobjType.type, DEF_REM_CALL); break; - case ADD: execute.invokeStatic(definition.defobjType.type, DEF_ADD_CALL); break; - case SUB: execute.invokeStatic(definition.defobjType.type, DEF_SUB_CALL); break; - case LSH: execute.invokeStatic(definition.defobjType.type, DEF_LSH_CALL); break; - case USH: execute.invokeStatic(definition.defobjType.type, DEF_RSH_CALL); break; - case RSH: execute.invokeStatic(definition.defobjType.type, DEF_USH_CALL); break; - case BWAND: execute.invokeStatic(definition.defobjType.type, DEF_AND_CALL); break; - case BWXOR: execute.invokeStatic(definition.defobjType.type, DEF_XOR_CALL); break; - case BWOR: execute.invokeStatic(definition.defobjType.type, DEF_OR_CALL); break; - default: - throw new IllegalStateException(Metadata.error(source) + "Unexpected writer state."); - } - } else { - switch (token) { - case MUL: execute.math(GeneratorAdapter.MUL, type.type); break; - case DIV: execute.math(GeneratorAdapter.DIV, type.type); break; - case REM: execute.math(GeneratorAdapter.REM, type.type); break; - case ADD: execute.math(GeneratorAdapter.ADD, type.type); break; - case SUB: execute.math(GeneratorAdapter.SUB, type.type); break; - case LSH: execute.math(GeneratorAdapter.SHL, type.type); break; - case USH: execute.math(GeneratorAdapter.USHR, type.type); break; - case RSH: execute.math(GeneratorAdapter.SHR, type.type); break; - case BWAND: execute.math(GeneratorAdapter.AND, type.type); break; - case BWXOR: execute.math(GeneratorAdapter.XOR, type.type); break; - case BWOR: execute.math(GeneratorAdapter.OR, type.type); break; - default: - throw new IllegalStateException(Metadata.error(source) + "Unexpected writer state."); - } - } - } - } - - /** - * Called for any compound assignment (including increment/decrement instructions). - * We have to be stricter than writeBinary, and do overflow checks against the original type's size - * instead of the promoted type's size, since the result will be implicitly cast back. - * - * @return true if an instruction is written, false otherwise - */ - private boolean writeExactInstruction(final Sort osort, final Sort psort) { - if (psort == Sort.DOUBLE) { - if (osort == Sort.FLOAT) { - execute.invokeStatic(definition.utilityType.type, TOFLOATWOOVERFLOW_DOUBLE); - } else if (osort == Sort.FLOAT_OBJ) { - execute.invokeStatic(definition.utilityType.type, TOFLOATWOOVERFLOW_DOUBLE); - execute.checkCast(definition.floatobjType.type); - } else if (osort == Sort.LONG) { - execute.invokeStatic(definition.utilityType.type, TOLONGWOOVERFLOW_DOUBLE); - } else if (osort == Sort.LONG_OBJ) { - execute.invokeStatic(definition.utilityType.type, TOLONGWOOVERFLOW_DOUBLE); - execute.checkCast(definition.longobjType.type); - } else if (osort == Sort.INT) { - execute.invokeStatic(definition.utilityType.type, TOINTWOOVERFLOW_DOUBLE); - } else if (osort == Sort.INT_OBJ) { - execute.invokeStatic(definition.utilityType.type, TOINTWOOVERFLOW_DOUBLE); - execute.checkCast(definition.intobjType.type); - } else if (osort == Sort.CHAR) { - execute.invokeStatic(definition.utilityType.type, TOCHARWOOVERFLOW_DOUBLE); - } else if (osort == Sort.CHAR_OBJ) { - execute.invokeStatic(definition.utilityType.type, TOCHARWOOVERFLOW_DOUBLE); - execute.checkCast(definition.charobjType.type); - } else if (osort == Sort.SHORT) { - execute.invokeStatic(definition.utilityType.type, TOSHORTWOOVERFLOW_DOUBLE); - } else if (osort == Sort.SHORT_OBJ) { - execute.invokeStatic(definition.utilityType.type, TOSHORTWOOVERFLOW_DOUBLE); - execute.checkCast(definition.shortobjType.type); - } else if (osort == Sort.BYTE) { - execute.invokeStatic(definition.utilityType.type, TOBYTEWOOVERFLOW_DOUBLE); - } else if (osort == Sort.BYTE_OBJ) { - execute.invokeStatic(definition.utilityType.type, TOBYTEWOOVERFLOW_DOUBLE); - execute.checkCast(definition.byteobjType.type); - } else { - return false; - } - } else if (psort == Sort.FLOAT) { - if (osort == Sort.LONG) { - execute.invokeStatic(definition.utilityType.type, TOLONGWOOVERFLOW_FLOAT); - } else if (osort == Sort.LONG_OBJ) { - execute.invokeStatic(definition.utilityType.type, TOLONGWOOVERFLOW_FLOAT); - execute.checkCast(definition.longobjType.type); - } else if (osort == Sort.INT) { - execute.invokeStatic(definition.utilityType.type, TOINTWOOVERFLOW_FLOAT); - } else if (osort == Sort.INT_OBJ) { - execute.invokeStatic(definition.utilityType.type, TOINTWOOVERFLOW_FLOAT); - execute.checkCast(definition.intobjType.type); - } else if (osort == Sort.CHAR) { - execute.invokeStatic(definition.utilityType.type, TOCHARWOOVERFLOW_FLOAT); - } else if (osort == Sort.CHAR_OBJ) { - execute.invokeStatic(definition.utilityType.type, TOCHARWOOVERFLOW_FLOAT); - execute.checkCast(definition.charobjType.type); - } else if (osort == Sort.SHORT) { - execute.invokeStatic(definition.utilityType.type, TOSHORTWOOVERFLOW_FLOAT); - } else if (osort == Sort.SHORT_OBJ) { - execute.invokeStatic(definition.utilityType.type, TOSHORTWOOVERFLOW_FLOAT); - execute.checkCast(definition.shortobjType.type); - } else if (osort == Sort.BYTE) { - execute.invokeStatic(definition.utilityType.type, TOBYTEWOOVERFLOW_FLOAT); - } else if (osort == Sort.BYTE_OBJ) { - execute.invokeStatic(definition.utilityType.type, TOBYTEWOOVERFLOW_FLOAT); - execute.checkCast(definition.byteobjType.type); - } else { - return false; - } - } else if (psort == Sort.LONG) { - if (osort == Sort.INT) { - execute.invokeStatic(definition.mathType.type, TOINTEXACT_LONG); - } else if (osort == Sort.INT_OBJ) { - execute.invokeStatic(definition.mathType.type, TOINTEXACT_LONG); - execute.checkCast(definition.intobjType.type); - } else if (osort == Sort.CHAR) { - execute.invokeStatic(definition.utilityType.type, TOCHAREXACT_LONG); - } else if (osort == Sort.CHAR_OBJ) { - execute.invokeStatic(definition.utilityType.type, TOCHAREXACT_LONG); - execute.checkCast(definition.charobjType.type); - } else if (osort == Sort.SHORT) { - execute.invokeStatic(definition.utilityType.type, TOSHORTEXACT_LONG); - } else if (osort == Sort.SHORT_OBJ) { - execute.invokeStatic(definition.utilityType.type, TOSHORTEXACT_LONG); - execute.checkCast(definition.shortobjType.type); - } else if (osort == Sort.BYTE) { - execute.invokeStatic(definition.utilityType.type, TOBYTEEXACT_LONG); - } else if (osort == Sort.BYTE_OBJ) { - execute.invokeStatic(definition.utilityType.type, TOBYTEEXACT_LONG); - execute.checkCast(definition.byteobjType.type); - } else { - return false; - } - } else if (psort == Sort.INT) { - if (osort == Sort.CHAR) { - execute.invokeStatic(definition.utilityType.type, TOCHAREXACT_INT); - } else if (osort == Sort.CHAR_OBJ) { - execute.invokeStatic(definition.utilityType.type, TOCHAREXACT_INT); - execute.checkCast(definition.charobjType.type); - } else if (osort == Sort.SHORT) { - execute.invokeStatic(definition.utilityType.type, TOSHORTEXACT_INT); - } else if (osort == Sort.SHORT_OBJ) { - execute.invokeStatic(definition.utilityType.type, TOSHORTEXACT_INT); - execute.checkCast(definition.shortobjType.type); - } else if (osort == Sort.BYTE) { - execute.invokeStatic(definition.utilityType.type, TOBYTEEXACT_INT); - } else if (osort == Sort.BYTE_OBJ) { - execute.invokeStatic(definition.utilityType.type, TOBYTEEXACT_INT); - execute.checkCast(definition.byteobjType.type); - } else { - return false; - } - } else { - return false; - } - - return true; - } - - private void writeLoadStoreExternal(final ParserRuleContext source) { - final Metadata.ExtNodeMetadata sourceenmd = metadata.getExtNodeMetadata(source); - final Metadata.ExternalMetadata parentemd = metadata.getExternalMetadata(sourceenmd.parent); - - final boolean length = "#length".equals(sourceenmd.target); - final boolean array = "#brace".equals(sourceenmd.target); - final boolean name = sourceenmd.target instanceof String && !length && !array; - final boolean variable = sourceenmd.target instanceof Integer; - final boolean field = sourceenmd.target instanceof Field; - final boolean shortcut = sourceenmd.target instanceof Object[]; - - if (!length && !variable && !field && !array && !name && !shortcut) { - throw new IllegalStateException(Metadata.error(source) + "Target not found for load/store."); - } - - final boolean maplist = shortcut && (boolean)((Object[])sourceenmd.target)[2]; - final Object constant = shortcut ? ((Object[])sourceenmd.target)[3] : null; - - final boolean x1 = field || name || (shortcut && !maplist); - final boolean x2 = array || (shortcut && maplist); - - if (length) { - execute.arrayLength(); - } else if (sourceenmd.last && parentemd.storeExpr != null) { - final Metadata.ExpressionMetadata expremd = metadata.getExpressionMetadata(parentemd.storeExpr); - final boolean cat = strings.contains(parentemd.storeExpr); - - if (cat) { - if (field || name || shortcut) { - execute.dupX1(); - } else if (array) { - execute.dup2X1(); - } - - if (maplist) { - if (constant != null) { - writeConstant(source, constant); - } - - execute.dupX2(); - } - - writeLoadStoreInstruction(source, false, variable, field, name, array, shortcut); - writeAppendStrings(sourceenmd.type.sort); - visit(parentemd.storeExpr); - - if (strings.contains(parentemd.storeExpr)) { - writeAppendStrings(expremd.to.sort); - strings.remove(parentemd.storeExpr); - } - - writeToStrings(); - checkWriteCast(source, sourceenmd.castTo); - - if (parentemd.read) { - writeDup(sourceenmd.type.sort.size, x1, x2); - } - - writeLoadStoreInstruction(source, true, variable, field, name, array, shortcut); - } else if (parentemd.token > 0) { - final int token = parentemd.token; - - if (field || name || shortcut) { - execute.dup(); - } else if (array) { - execute.dup2(); - } - - if (maplist) { - if (constant != null) { - writeConstant(source, constant); - } - - execute.dupX1(); - } - - writeLoadStoreInstruction(source, false, variable, field, name, array, shortcut); - - if (parentemd.read && parentemd.post) { - writeDup(sourceenmd.type.sort.size, x1, x2); - } - - checkWriteCast(source, sourceenmd.castFrom); - visit(parentemd.storeExpr); - - writeBinaryInstruction(source, sourceenmd.promote, token); - - boolean exact = false; - - if (!settings.getNumericOverflow() && expremd.typesafe && sourceenmd.type.sort != Sort.DEF && - (token == MUL || token == DIV || token == REM || token == ADD || token == SUB)) { - exact = writeExactInstruction(sourceenmd.type.sort, sourceenmd.promote.sort); - } - - if (!exact) { - checkWriteCast(source, sourceenmd.castTo); - } - - if (parentemd.read && !parentemd.post) { - writeDup(sourceenmd.type.sort.size, x1, x2); - } - - writeLoadStoreInstruction(source, true, variable, field, name, array, shortcut); - } else { - if (constant != null) { - writeConstant(source, constant); - } - - visit(parentemd.storeExpr); - - if (parentemd.read) { - writeDup(sourceenmd.type.sort.size, x1, x2); - } - - writeLoadStoreInstruction(source, true, variable, field, name, array, shortcut); - } - } else { - if (constant != null) { - writeConstant(source, constant); - } - - writeLoadStoreInstruction(source, false, variable, field, name, array, shortcut); - } - } - - private void writeLoadStoreInstruction(final ParserRuleContext source, - final boolean store, final boolean variable, - final boolean field, final boolean name, - final boolean array, final boolean shortcut) { - final Metadata.ExtNodeMetadata sourceemd = metadata.getExtNodeMetadata(source); - - if (variable) { - writeLoadStoreVariable(source, store, sourceemd.type, (int)sourceemd.target); - } else if (field) { - writeLoadStoreField(store, (Field)sourceemd.target); - } else if (name) { - writeLoadStoreField(source, store, (String)sourceemd.target); - } else if (array) { - writeLoadStoreArray(source, store, sourceemd.type); - } else if (shortcut) { - Object[] targets = (Object[])sourceemd.target; - writeLoadStoreShortcut(store, (Method)targets[0], (Method)targets[1]); - } else { - throw new IllegalStateException(Metadata.error(source) + "Load/Store requires a variable, field, or array."); - } - } - - private void writeLoadStoreVariable(final ParserRuleContext source, final boolean store, - final Type type, final int slot) { - if (type.sort == Sort.VOID) { - throw new IllegalStateException(Metadata.error(source) + "Cannot load/store void type."); - } - - if (store) { - execute.visitVarInsn(type.type.getOpcode(Opcodes.ISTORE), slot); - } else { - execute.visitVarInsn(type.type.getOpcode(Opcodes.ILOAD), slot); - } - } - - private void writeLoadStoreField(final boolean store, final Field field) { - if (java.lang.reflect.Modifier.isStatic(field.reflect.getModifiers())) { - if (store) { - execute.putStatic(field.owner.type, field.reflect.getName(), field.type.type); - } else { - execute.getStatic(field.owner.type, field.reflect.getName(), field.type.type); - - if (!field.generic.clazz.equals(field.type.clazz)) { - execute.checkCast(field.generic.type); - } - } - } else { - if (store) { - execute.putField(field.owner.type, field.reflect.getName(), field.type.type); - } else { - execute.getField(field.owner.type, field.reflect.getName(), field.type.type); - - if (!field.generic.clazz.equals(field.type.clazz)) { - execute.checkCast(field.generic.type); - } - } - } - } - - private void writeLoadStoreField(final ParserRuleContext source, final boolean store, final String name) { - if (store) { - final Metadata.ExtNodeMetadata sourceemd = metadata.getExtNodeMetadata(source); - final Metadata.ExternalMetadata parentemd = metadata.getExternalMetadata(sourceemd.parent); - final Metadata.ExpressionMetadata expremd = metadata.getExpressionMetadata(parentemd.storeExpr); - - execute.push(name); - execute.loadThis(); - execute.getField(CLASS_TYPE, "definition", DEFINITION_TYPE); - execute.push(parentemd.token == 0 && expremd.typesafe); - execute.invokeStatic(definition.defobjType.type, DEF_FIELD_STORE); - } else { - execute.push(name); - execute.loadThis(); - execute.getField(CLASS_TYPE, "definition", DEFINITION_TYPE); - execute.invokeStatic(definition.defobjType.type, DEF_FIELD_LOAD); - } - } - - private void writeLoadStoreArray(final ParserRuleContext source, final boolean store, final Type type) { - if (type.sort == Sort.VOID) { - throw new IllegalStateException(Metadata.error(source) + "Cannot load/store void type."); - } - - if (type.sort == Sort.DEF) { - final ExtbraceContext bracectx = (ExtbraceContext)source; - final Metadata.ExpressionMetadata expremd0 = metadata.getExpressionMetadata(bracectx.expression()); - - if (store) { - final Metadata.ExtNodeMetadata braceenmd = metadata.getExtNodeMetadata(bracectx); - final Metadata.ExternalMetadata parentemd = metadata.getExternalMetadata(braceenmd.parent); - final Metadata.ExpressionMetadata expremd1 = metadata.getExpressionMetadata(parentemd.storeExpr); - - execute.loadThis(); - execute.getField(CLASS_TYPE, "definition", DEFINITION_TYPE); - execute.push(expremd0.typesafe); - execute.push(parentemd.token == 0 && expremd1.typesafe); - execute.invokeStatic(definition.defobjType.type, DEF_ARRAY_STORE); - } else { - execute.loadThis(); - execute.getField(CLASS_TYPE, "definition", DEFINITION_TYPE); - execute.push(expremd0.typesafe); - execute.invokeStatic(definition.defobjType.type, DEF_ARRAY_LOAD); - } - } else { - if (store) { - execute.arrayStore(type.type); - } else { - execute.arrayLoad(type.type); - } - } - } - - private void writeLoadStoreShortcut(final boolean store, final Method getter, final Method setter) { - final Method method = store ? setter : getter; - - if (java.lang.reflect.Modifier.isInterface(getter.owner.clazz.getModifiers())) { - execute.invokeInterface(method.owner.type, method.method); - } else { - execute.invokeVirtual(method.owner.type, method.method); - } - - if (store) { - writePop(method.rtn.type.getSize()); - } else if (!method.rtn.clazz.equals(method.handle.type().returnType())) { - execute.checkCast(method.rtn.type); - } - } - - private void writeDup(final int size, final boolean x1, final boolean x2) { - if (size == 1) { - if (x2) { - execute.dupX2(); - } else if (x1) { - execute.dupX1(); - } else { - execute.dup(); - } - } else if (size == 2) { - if (x2) { - execute.dup2X2(); - } else if (x1) { - execute.dup2X1(); - } else { - execute.dup2(); - } - } - } - - private void writeNewExternal(final ExtnewContext source) { - final Metadata.ExtNodeMetadata sourceenmd = metadata.getExtNodeMetadata(source); - final Metadata.ExternalMetadata parentemd = metadata.getExternalMetadata(sourceenmd.parent); - - final boolean makearray = "#makearray".equals(sourceenmd.target); - final boolean constructor = sourceenmd.target instanceof Constructor; - - if (!makearray && !constructor) { - throw new IllegalStateException(Metadata.error(source) + "Target not found for new call."); - } - - if (makearray) { - for (final ExpressionContext exprctx : source.expression()) { - visit(exprctx); - } - - if (sourceenmd.type.sort == Sort.ARRAY) { - execute.visitMultiANewArrayInsn(sourceenmd.type.type.getDescriptor(), sourceenmd.type.type.getDimensions()); - } else { - execute.newArray(sourceenmd.type.type); - } - } else { - execute.newInstance(sourceenmd.type.type); - - if (parentemd.read) { - execute.dup(); - } - - for (final ExpressionContext exprctx : source.arguments().expression()) { - visit(exprctx); - } - - final Constructor target = (Constructor)sourceenmd.target; - execute.invokeConstructor(target.owner.type, target.method); - } - } - - private void writeCallExternal(final ExtcallContext source) { - final Metadata.ExtNodeMetadata sourceenmd = metadata.getExtNodeMetadata(source); - - final boolean method = sourceenmd.target instanceof Method; - final boolean def = sourceenmd.target instanceof String; - - if (!method && !def) { - throw new IllegalStateException(Metadata.error(source) + "Target not found for call."); - } - - final List arguments = source.arguments().expression(); - - if (method) { - for (final ExpressionContext exprctx : arguments) { - visit(exprctx); - } - - final Method target = (Method)sourceenmd.target; - - if (java.lang.reflect.Modifier.isStatic(target.reflect.getModifiers())) { - execute.invokeStatic(target.owner.type, target.method); - } else if (java.lang.reflect.Modifier.isInterface(target.owner.clazz.getModifiers())) { - execute.invokeInterface(target.owner.type, target.method); - } else { - execute.invokeVirtual(target.owner.type, target.method); - } - - if (!target.rtn.clazz.equals(target.handle.type().returnType())) { - execute.checkCast(target.rtn.type); - } - } else { - execute.push((String)sourceenmd.target); - execute.loadThis(); - execute.getField(CLASS_TYPE, "definition", DEFINITION_TYPE); - - execute.push(arguments.size()); - execute.newArray(definition.defType.type); - - for (int argument = 0; argument < arguments.size(); ++argument) { - execute.dup(); - execute.push(argument); - visit(arguments.get(argument)); - execute.arrayStore(definition.defType.type); - } - - execute.push(arguments.size()); - execute.newArray(definition.booleanType.type); - - for (int argument = 0; argument < arguments.size(); ++argument) { - execute.dup(); - execute.push(argument); - execute.push(metadata.getExpressionMetadata(arguments.get(argument)).typesafe); - execute.arrayStore(definition.booleanType.type); - } - - execute.invokeStatic(definition.defobjType.type, DEF_METHOD_CALL); - } - } - - private void writePop(final int size) { - if (size == 1) { - execute.pop(); - } else if (size == 2) { - execute.pop2(); - } - } - - private void checkWriteCast(final Metadata.ExpressionMetadata sort) { - checkWriteCast(sort.source, sort.cast); - } - - private void checkWriteCast(final ParserRuleContext source, final Cast cast) { - if (cast instanceof Transform) { - writeTransform((Transform)cast); - } else if (cast != null) { - writeCast(cast); - } else { - throw new IllegalStateException(Metadata.error(source) + "Unexpected cast object."); - } - } - - private void writeCast(final Cast cast) { - final Type from = cast.from; - final Type to = cast.to; - - if (from.equals(to)) { - return; - } - - if (from.sort.numeric && from.sort.primitive && to.sort.numeric && to.sort.primitive) { - execute.cast(from.type, to.type); - } else { - try { - from.clazz.asSubclass(to.clazz); - } catch (ClassCastException exception) { - execute.checkCast(to.type); - } - } - } - - private void writeTransform(final Transform transform) { - if (transform.upcast != null) { - execute.checkCast(transform.upcast.type); - } - - if (java.lang.reflect.Modifier.isStatic(transform.method.reflect.getModifiers())) { - execute.invokeStatic(transform.method.owner.type, transform.method.method); - } else if (java.lang.reflect.Modifier.isInterface(transform.method.owner.clazz.getModifiers())) { - execute.invokeInterface(transform.method.owner.type, transform.method.method); - } else { - execute.invokeVirtual(transform.method.owner.type, transform.method.method); - } - - if (transform.downcast != null) { - execute.checkCast(transform.downcast.type); - } - } - - void checkWriteBranch(final ParserRuleContext source) { - final Branch branch = getBranch(source); - - if (branch != null) { - if (branch.tru != null) { - execute.visitJumpInsn(Opcodes.IFNE, branch.tru); - } else if (branch.fals != null) { - execute.visitJumpInsn(Opcodes.IFEQ, branch.fals); - } - } - } - private void writeEnd() { writer.visitEnd(); } @@ -2376,4 +179,352 @@ class Writer extends PainlessParserBaseVisitor { private byte[] getBytes() { return writer.toByteArray(); } + + @Override + public Void visitSource(final SourceContext ctx) { + statement.processSource(ctx); + + return null; + } + + @Override + public Void visitIf(final IfContext ctx) { + statement.processIf(ctx); + + return null; + } + + @Override + public Void visitWhile(final WhileContext ctx) { + statement.processWhile(ctx); + + return null; + } + + @Override + public Void visitDo(final DoContext ctx) { + statement.processDo(ctx); + + return null; + } + + @Override + public Void visitFor(final ForContext ctx) { + statement.processFor(ctx); + + return null; + } + + @Override + public Void visitDecl(final DeclContext ctx) { + statement.processDecl(ctx); + + return null; + } + + @Override + public Void visitContinue(final ContinueContext ctx) { + statement.processContinue(); + + return null; + } + + @Override + public Void visitBreak(final BreakContext ctx) { + statement.processBreak(); + + return null; + } + + @Override + public Void visitReturn(final ReturnContext ctx) { + statement.processReturn(ctx); + + return null; + } + + @Override + public Void visitTry(final TryContext ctx) { + statement.processTry(ctx); + + return null; + } + + @Override + public Void visitThrow(final ThrowContext ctx) { + statement.processThrow(ctx); + + return null; + } + + @Override + public Void visitExpr(final ExprContext ctx) { + statement.processExpr(ctx); + + return null; + } + + @Override + public Void visitMultiple(final MultipleContext ctx) { + statement.processMultiple(ctx); + + return null; + } + + @Override + public Void visitSingle(final SingleContext ctx) { + statement.processSingle(ctx); + + return null; + } + + @Override + public Void visitEmpty(final EmptyContext ctx) { + throw new UnsupportedOperationException(WriterUtility.error(ctx) + "Unexpected state."); + } + + @Override + public Void visitEmptyscope(final EmptyscopeContext ctx) { + throw new UnsupportedOperationException(WriterUtility.error(ctx) + "Unexpected state."); + } + + @Override + public Void visitInitializer(final InitializerContext ctx) { + statement.processInitializer(ctx); + + return null; + } + + @Override + public Void visitAfterthought(final AfterthoughtContext ctx) { + statement.processAfterthought(ctx); + + return null; + } + + @Override + public Void visitDeclaration(DeclarationContext ctx) { + statement.processDeclaration(ctx); + + return null; + } + + @Override + public Void visitDecltype(final DecltypeContext ctx) { + throw new UnsupportedOperationException(WriterUtility.error(ctx) + "Unexpected state."); + } + + @Override + public Void visitDeclvar(final DeclvarContext ctx) { + statement.processDeclvar(ctx); + + return null; + } + + @Override + public Void visitTrap(final TrapContext ctx) { + statement.processTrap(ctx); + + return null; + } + + @Override + public Void visitPrecedence(final PrecedenceContext ctx) { + throw new UnsupportedOperationException(WriterUtility.error(ctx) + "Unexpected state."); + } + + @Override + public Void visitNumeric(final NumericContext ctx) { + expression.processNumeric(ctx); + + return null; + } + + @Override + public Void visitChar(final CharContext ctx) { + expression.processChar(ctx); + + return null; + } + + @Override + public Void visitTrue(final TrueContext ctx) { + expression.processTrue(ctx); + + return null; + } + + @Override + public Void visitFalse(final FalseContext ctx) { + expression.processFalse(ctx); + + return null; + } + + @Override + public Void visitNull(final NullContext ctx) { + expression.processNull(ctx); + + return null; + } + + @Override + public Void visitExternal(final ExternalContext ctx) { + expression.processExternal(ctx); + + return null; + } + + + @Override + public Void visitPostinc(final PostincContext ctx) { + expression.processPostinc(ctx); + + return null; + } + + @Override + public Void visitPreinc(final PreincContext ctx) { + expression.processPreinc(ctx); + + return null; + } + + @Override + public Void visitUnary(final UnaryContext ctx) { + expression.processUnary(ctx); + + return null; + } + + @Override + public Void visitCast(final CastContext ctx) { + expression.processCast(ctx); + + return null; + } + + @Override + public Void visitBinary(final BinaryContext ctx) { + expression.processBinary(ctx); + + return null; + } + + @Override + public Void visitComp(final CompContext ctx) { + expression.processComp(ctx); + + return null; + } + + @Override + public Void visitBool(final BoolContext ctx) { + expression.processBool(ctx); + + return null; + } + + @Override + public Void visitConditional(final ConditionalContext ctx) { + expression.processConditional(ctx); + + return null; + } + + @Override + public Void visitAssignment(final AssignmentContext ctx) { + expression.processAssignment(ctx); + + return null; + } + + @Override + public Void visitExtstart(final ExtstartContext ctx) { + external.processExtstart(ctx); + + return null; + } + + @Override + public Void visitExtprec(final ExtprecContext ctx) { + external.processExtprec(ctx); + + return null; + } + + @Override + public Void visitExtcast(final ExtcastContext ctx) { + external.processExtcast(ctx); + + return null; + } + + @Override + public Void visitExtbrace(final ExtbraceContext ctx) { + external.processExtbrace(ctx); + + return null; + } + + @Override + public Void visitExtdot(final ExtdotContext ctx) { + external.processExtdot(ctx); + + return null; + } + + @Override + public Void visitExttype(final ExttypeContext ctx) { + external.processExttype(ctx); + + return null; + } + + @Override + public Void visitExtcall(final ExtcallContext ctx) { + external.processExtcall(ctx); + + return null; + } + + @Override + public Void visitExtvar(final ExtvarContext ctx) { + external.processExtvar(ctx); + + return null; + } + + @Override + public Void visitExtfield(final ExtfieldContext ctx) { + external.processExtfield(ctx); + + return null; + } + + @Override + public Void visitExtnew(final ExtnewContext ctx) { + external.processExtnew(ctx); + + return null; + } + + @Override + public Void visitExtstring(final ExtstringContext ctx) { + external.processExtstring(ctx); + + return null; + } + + @Override + public Void visitArguments(final ArgumentsContext ctx) { + throw new UnsupportedOperationException(WriterUtility.error(ctx) + "Unexpected state."); + } + + @Override + public Void visitIncrement(final IncrementContext ctx) { + expression.processIncrement(ctx); + + return null; + } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterCaster.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterCaster.java new file mode 100644 index 00000000000..c55dff2f549 --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterCaster.java @@ -0,0 +1,86 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +import org.antlr.v4.runtime.ParserRuleContext; +import org.elasticsearch.painless.Definition.Cast; +import org.elasticsearch.painless.Definition.Transform; +import org.elasticsearch.painless.Definition.Type; +import org.elasticsearch.painless.Metadata.ExpressionMetadata; +import org.objectweb.asm.commons.GeneratorAdapter; + +class WriterCaster { + private final GeneratorAdapter execute; + + WriterCaster(final GeneratorAdapter execute) { + this.execute = execute; + } + + void checkWriteCast(final ExpressionMetadata sort) { + checkWriteCast(sort.source, sort.cast); + } + + void checkWriteCast(final ParserRuleContext source, final Cast cast) { + if (cast instanceof Transform) { + writeTransform((Transform)cast); + } else if (cast != null) { + writeCast(cast); + } else { + throw new IllegalStateException(WriterUtility.error(source) + "Unexpected cast object."); + } + } + + private void writeCast(final Cast cast) { + final Type from = cast.from; + final Type to = cast.to; + + if (from.equals(to)) { + return; + } + + if (from.sort.numeric && from.sort.primitive && to.sort.numeric && to.sort.primitive) { + execute.cast(from.type, to.type); + } else { + try { + from.clazz.asSubclass(to.clazz); + } catch (ClassCastException exception) { + execute.checkCast(to.type); + } + } + } + + private void writeTransform(final Transform transform) { + if (transform.upcast != null) { + execute.checkCast(transform.upcast.type); + } + + if (java.lang.reflect.Modifier.isStatic(transform.method.reflect.getModifiers())) { + execute.invokeStatic(transform.method.owner.type, transform.method.method); + } else if (java.lang.reflect.Modifier.isInterface(transform.method.owner.clazz.getModifiers())) { + execute.invokeInterface(transform.method.owner.type, transform.method.method); + } else { + execute.invokeVirtual(transform.method.owner.type, transform.method.method); + } + + if (transform.downcast != null) { + execute.checkCast(transform.downcast.type); + } + } +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java new file mode 100644 index 00000000000..33fea094058 --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java @@ -0,0 +1,138 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +import org.elasticsearch.script.ScoreAccessor; +import org.objectweb.asm.Type; +import org.objectweb.asm.commons.Method; + +import java.lang.invoke.MethodType; +import java.util.Map; + +class WriterConstants { + final static String BASE_CLASS_NAME = Executable.class.getName(); + final static String CLASS_NAME = BASE_CLASS_NAME + "$CompiledPainlessExecutable"; + final static Type BASE_CLASS_TYPE = Type.getType(Executable.class); + final static Type CLASS_TYPE = Type.getType("L" + CLASS_NAME.replace(".", "/") + ";"); + + final static Method CONSTRUCTOR = getAsmMethod(void.class, "", Definition.class, String.class, String.class); + final static Method EXECUTE = getAsmMethod(Object.class, "execute", Map.class); + final static String SIGNATURE = "(Ljava/util/Map;)Ljava/lang/Object;"; + + final static Type PAINLESS_ERROR_TYPE = Type.getType(PainlessError.class); + + final static Type DEFINITION_TYPE = Type.getType(Definition.class); + + final static Type MAP_TYPE = Type.getType(Map.class); + final static Method MAP_GET = getAsmMethod(Object.class, "get", Object.class); + + final static Type SCORE_ACCESSOR_TYPE = Type.getType(ScoreAccessor.class); + final static Method SCORE_ACCESSOR_FLOAT = getAsmMethod(float.class, "floatValue"); + + final static Method DEF_METHOD_CALL = getAsmMethod( + Object.class, "methodCall", Object.class, String.class, Definition.class, Object[].class, boolean[].class); + final static Method DEF_ARRAY_STORE = getAsmMethod( + void.class, "arrayStore", Object.class, Object.class, Object.class, Definition.class, boolean.class, boolean.class); + final static Method DEF_ARRAY_LOAD = getAsmMethod( + Object.class, "arrayLoad", Object.class, Object.class, Definition.class, boolean.class); + final static Method DEF_FIELD_STORE = getAsmMethod( + void.class, "fieldStore", Object.class, Object.class, String.class, Definition.class, boolean.class); + final static Method DEF_FIELD_LOAD = getAsmMethod( + Object.class, "fieldLoad", Object.class, String.class, Definition.class); + + final static Method DEF_NOT_CALL = getAsmMethod(Object.class, "not", Object.class); + final static Method DEF_NEG_CALL = getAsmMethod(Object.class, "neg", Object.class); + final static Method DEF_MUL_CALL = getAsmMethod(Object.class, "mul", Object.class, Object.class); + final static Method DEF_DIV_CALL = getAsmMethod(Object.class, "div", Object.class, Object.class); + final static Method DEF_REM_CALL = getAsmMethod(Object.class, "rem", Object.class, Object.class); + final static Method DEF_ADD_CALL = getAsmMethod(Object.class, "add", Object.class, Object.class); + final static Method DEF_SUB_CALL = getAsmMethod(Object.class, "sub", Object.class, Object.class); + final static Method DEF_LSH_CALL = getAsmMethod(Object.class, "lsh", Object.class, Object.class); + final static Method DEF_RSH_CALL = getAsmMethod(Object.class, "rsh", Object.class, Object.class); + final static Method DEF_USH_CALL = getAsmMethod(Object.class, "ush", Object.class, Object.class); + final static Method DEF_AND_CALL = getAsmMethod(Object.class, "and", Object.class, Object.class); + final static Method DEF_XOR_CALL = getAsmMethod(Object.class, "xor", Object.class, Object.class); + final static Method DEF_OR_CALL = getAsmMethod(Object.class, "or" , Object.class, Object.class); + final static Method DEF_EQ_CALL = getAsmMethod(boolean.class, "eq" , Object.class, Object.class); + final static Method DEF_LT_CALL = getAsmMethod(boolean.class, "lt" , Object.class, Object.class); + final static Method DEF_LTE_CALL = getAsmMethod(boolean.class, "lte", Object.class, Object.class); + final static Method DEF_GT_CALL = getAsmMethod(boolean.class, "gt" , Object.class, Object.class); + final static Method DEF_GTE_CALL = getAsmMethod(boolean.class, "gte", Object.class, Object.class); + + final static Type STRINGBUILDER_TYPE = Type.getType(StringBuilder.class); + + final static Method STRINGBUILDER_CONSTRUCTOR = getAsmMethod(void.class, ""); + final static Method STRINGBUILDER_APPEND_BOOLEAN = getAsmMethod(StringBuilder.class, "append", boolean.class); + final static Method STRINGBUILDER_APPEND_CHAR = getAsmMethod(StringBuilder.class, "append", char.class); + final static Method STRINGBUILDER_APPEND_INT = getAsmMethod(StringBuilder.class, "append", int.class); + final static Method STRINGBUILDER_APPEND_LONG = getAsmMethod(StringBuilder.class, "append", long.class); + final static Method STRINGBUILDER_APPEND_FLOAT = getAsmMethod(StringBuilder.class, "append", float.class); + final static Method STRINGBUILDER_APPEND_DOUBLE = getAsmMethod(StringBuilder.class, "append", double.class); + final static Method STRINGBUILDER_APPEND_STRING = getAsmMethod(StringBuilder.class, "append", String.class); + final static Method STRINGBUILDER_APPEND_OBJECT = getAsmMethod(StringBuilder.class, "append", Object.class); + final static Method STRINGBUILDER_TOSTRING = getAsmMethod(String.class, "toString"); + + final static Method TOINTEXACT_LONG = getAsmMethod(int.class, "toIntExact", long.class); + final static Method NEGATEEXACT_INT = getAsmMethod(int.class, "negateExact", int.class); + final static Method NEGATEEXACT_LONG = getAsmMethod(long.class, "negateExact", long.class); + final static Method MULEXACT_INT = getAsmMethod(int.class, "multiplyExact", int.class, int.class); + final static Method MULEXACT_LONG = getAsmMethod(long.class, "multiplyExact", long.class, long.class); + final static Method ADDEXACT_INT = getAsmMethod(int.class, "addExact", int.class, int.class); + final static Method ADDEXACT_LONG = getAsmMethod(long.class, "addExact", long.class, long.class); + final static Method SUBEXACT_INT = getAsmMethod(int.class, "subtractExact", int.class, int.class); + final static Method SUBEXACT_LONG = getAsmMethod(long.class, "subtractExact", long.class, long.class); + + final static Method CHECKEQUALS = getAsmMethod(boolean.class, "checkEquals", Object.class, Object.class); + final static Method TOBYTEEXACT_INT = getAsmMethod(byte.class, "toByteExact", int.class); + final static Method TOBYTEEXACT_LONG = getAsmMethod(byte.class, "toByteExact", long.class); + final static Method TOBYTEWOOVERFLOW_FLOAT = getAsmMethod(byte.class, "toByteWithoutOverflow", float.class); + final static Method TOBYTEWOOVERFLOW_DOUBLE = getAsmMethod(byte.class, "toByteWithoutOverflow", double.class); + final static Method TOSHORTEXACT_INT = getAsmMethod(short.class, "toShortExact", int.class); + final static Method TOSHORTEXACT_LONG = getAsmMethod(short.class, "toShortExact", long.class); + final static Method TOSHORTWOOVERFLOW_FLOAT = getAsmMethod(short.class, "toShortWithoutOverflow", float.class); + final static Method TOSHORTWOOVERFLOW_DOUBLE = getAsmMethod(short.class, "toShortWihtoutOverflow", double.class); + final static Method TOCHAREXACT_INT = getAsmMethod(char.class, "toCharExact", int.class); + final static Method TOCHAREXACT_LONG = getAsmMethod(char.class, "toCharExact", long.class); + final static Method TOCHARWOOVERFLOW_FLOAT = getAsmMethod(char.class, "toCharWithoutOverflow", float.class); + final static Method TOCHARWOOVERFLOW_DOUBLE = getAsmMethod(char.class, "toCharWithoutOverflow", double.class); + final static Method TOINTWOOVERFLOW_FLOAT = getAsmMethod(int.class, "toIntWithoutOverflow", float.class); + final static Method TOINTWOOVERFLOW_DOUBLE = getAsmMethod(int.class, "toIntWithoutOverflow", double.class); + final static Method TOLONGWOOVERFLOW_FLOAT = getAsmMethod(long.class, "toLongWithoutOverflow", float.class); + final static Method TOLONGWOOVERFLOW_DOUBLE = getAsmMethod(long.class, "toLongWithoutOverflow", double.class); + final static Method TOFLOATWOOVERFLOW_DOUBLE = getAsmMethod(float.class , "toFloatWihtoutOverflow", double.class); + final static Method MULWOOVERLOW_FLOAT = getAsmMethod(float.class, "multiplyWithoutOverflow", float.class, float.class); + final static Method MULWOOVERLOW_DOUBLE = getAsmMethod(double.class, "multiplyWithoutOverflow", double.class, double.class); + final static Method DIVWOOVERLOW_INT = getAsmMethod(int.class, "divideWithoutOverflow", int.class, int.class); + final static Method DIVWOOVERLOW_LONG = getAsmMethod(long.class, "divideWithoutOverflow", long.class, long.class); + final static Method DIVWOOVERLOW_FLOAT = getAsmMethod(float.class, "divideWithoutOverflow", float.class, float.class); + final static Method DIVWOOVERLOW_DOUBLE = getAsmMethod(double.class, "divideWithoutOverflow", double.class, double.class); + final static Method REMWOOVERLOW_FLOAT = getAsmMethod(float.class, "remainderWithoutOverflow", float.class, float.class); + final static Method REMWOOVERLOW_DOUBLE = getAsmMethod(double.class, "remainderWithoutOverflow", double.class, double.class); + final static Method ADDWOOVERLOW_FLOAT = getAsmMethod(float.class, "addWithoutOverflow", float.class, float.class); + final static Method ADDWOOVERLOW_DOUBLE = getAsmMethod(double.class, "addWithoutOverflow", double.class, double.class); + final static Method SUBWOOVERLOW_FLOAT = getAsmMethod(float.class, "subtractWithoutOverflow", float.class, float.class); + final static Method SUBWOOVERLOW_DOUBLE = getAsmMethod(double.class, "subtractWithoutOverflow", double.class, double.class); + + private static Method getAsmMethod(final Class rtype, final String name, final Class... ptypes) { + return new Method(name, MethodType.methodType(rtype, ptypes).toMethodDescriptorString()); + } + + private WriterConstants() {} +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterExpression.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterExpression.java new file mode 100644 index 00000000000..c850031efa0 --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterExpression.java @@ -0,0 +1,684 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +import org.elasticsearch.painless.Definition.Sort; +import org.elasticsearch.painless.Definition.Type; +import org.elasticsearch.painless.Metadata.ExpressionMetadata; +import org.elasticsearch.painless.PainlessParser.AssignmentContext; +import org.elasticsearch.painless.PainlessParser.BinaryContext; +import org.elasticsearch.painless.PainlessParser.BoolContext; +import org.elasticsearch.painless.PainlessParser.CastContext; +import org.elasticsearch.painless.PainlessParser.CharContext; +import org.elasticsearch.painless.PainlessParser.CompContext; +import org.elasticsearch.painless.PainlessParser.ConditionalContext; +import org.elasticsearch.painless.PainlessParser.ExpressionContext; +import org.elasticsearch.painless.PainlessParser.ExternalContext; +import org.elasticsearch.painless.PainlessParser.FalseContext; +import org.elasticsearch.painless.PainlessParser.IncrementContext; +import org.elasticsearch.painless.PainlessParser.NullContext; +import org.elasticsearch.painless.PainlessParser.NumericContext; +import org.elasticsearch.painless.PainlessParser.PostincContext; +import org.elasticsearch.painless.PainlessParser.PreincContext; +import org.elasticsearch.painless.PainlessParser.TrueContext; +import org.elasticsearch.painless.PainlessParser.UnaryContext; +import org.elasticsearch.painless.WriterUtility.Branch; +import org.objectweb.asm.Label; +import org.objectweb.asm.Opcodes; +import org.objectweb.asm.commons.GeneratorAdapter; + +import static org.elasticsearch.painless.PainlessParser.ADD; +import static org.elasticsearch.painless.PainlessParser.BWAND; +import static org.elasticsearch.painless.PainlessParser.BWOR; +import static org.elasticsearch.painless.PainlessParser.BWXOR; +import static org.elasticsearch.painless.PainlessParser.DIV; +import static org.elasticsearch.painless.PainlessParser.LSH; +import static org.elasticsearch.painless.PainlessParser.MUL; +import static org.elasticsearch.painless.PainlessParser.REM; +import static org.elasticsearch.painless.PainlessParser.RSH; +import static org.elasticsearch.painless.PainlessParser.SUB; +import static org.elasticsearch.painless.PainlessParser.USH; +import static org.elasticsearch.painless.WriterConstants.CHECKEQUALS; +import static org.elasticsearch.painless.WriterConstants.DEF_EQ_CALL; +import static org.elasticsearch.painless.WriterConstants.DEF_GTE_CALL; +import static org.elasticsearch.painless.WriterConstants.DEF_GT_CALL; +import static org.elasticsearch.painless.WriterConstants.DEF_LTE_CALL; +import static org.elasticsearch.painless.WriterConstants.DEF_LT_CALL; +import static org.elasticsearch.painless.WriterConstants.DEF_NEG_CALL; +import static org.elasticsearch.painless.WriterConstants.DEF_NOT_CALL; +import static org.elasticsearch.painless.WriterConstants.NEGATEEXACT_INT; +import static org.elasticsearch.painless.WriterConstants.NEGATEEXACT_LONG; + +class WriterExpression { + private final Metadata metadata; + private final Definition definition; + private final CompilerSettings settings; + + private final GeneratorAdapter execute; + + private final Writer writer; + private final WriterUtility utility; + private final WriterCaster caster; + + WriterExpression(final Metadata metadata, final GeneratorAdapter execute, final Writer writer, + final WriterUtility utility, final WriterCaster caster) { + this.metadata = metadata; + definition = metadata.definition; + settings = metadata.settings; + + this.execute = execute; + + this.writer = writer; + this.utility = utility; + this.caster = caster; + } + + void processNumeric(final NumericContext ctx) { + final ExpressionMetadata numericemd = metadata.getExpressionMetadata(ctx); + final Object postConst = numericemd.postConst; + + if (postConst == null) { + utility.writeNumeric(ctx, numericemd.preConst); + caster.checkWriteCast(numericemd); + } else { + utility.writeConstant(ctx, postConst); + } + + utility.checkWriteBranch(ctx); + } + + void processChar(final CharContext ctx) { + final ExpressionMetadata charemd = metadata.getExpressionMetadata(ctx); + final Object postConst = charemd.postConst; + + if (postConst == null) { + utility.writeNumeric(ctx, (int)(char)charemd.preConst); + caster.checkWriteCast(charemd); + } else { + utility.writeConstant(ctx, postConst); + } + + utility.checkWriteBranch(ctx); + } + + void processTrue(final TrueContext ctx) { + final ExpressionMetadata trueemd = metadata.getExpressionMetadata(ctx); + final Object postConst = trueemd.postConst; + final Branch branch = utility.getBranch(ctx); + + if (branch == null) { + if (postConst == null) { + utility.writeBoolean(ctx, true); + caster.checkWriteCast(trueemd); + } else { + utility.writeConstant(ctx, postConst); + } + } else if (branch.tru != null) { + execute.goTo(branch.tru); + } + } + + void processFalse(final FalseContext ctx) { + final ExpressionMetadata falseemd = metadata.getExpressionMetadata(ctx); + final Object postConst = falseemd.postConst; + final Branch branch = utility.getBranch(ctx); + + if (branch == null) { + if (postConst == null) { + utility.writeBoolean(ctx, false); + caster.checkWriteCast(falseemd); + } else { + utility.writeConstant(ctx, postConst); + } + } else if (branch.fals != null) { + execute.goTo(branch.fals); + } + } + + void processNull(final NullContext ctx) { + final ExpressionMetadata nullemd = metadata.getExpressionMetadata(ctx); + + execute.visitInsn(Opcodes.ACONST_NULL); + caster.checkWriteCast(nullemd); + utility.checkWriteBranch(ctx); + } + + void processExternal(final ExternalContext ctx) { + final ExpressionMetadata expremd = metadata.getExpressionMetadata(ctx); + writer.visit(ctx.extstart()); + caster.checkWriteCast(expremd); + utility.checkWriteBranch(ctx); + } + + + void processPostinc(final PostincContext ctx) { + final ExpressionMetadata expremd = metadata.getExpressionMetadata(ctx); + writer.visit(ctx.extstart()); + caster.checkWriteCast(expremd); + utility.checkWriteBranch(ctx); + } + + void processPreinc(final PreincContext ctx) { + final ExpressionMetadata expremd = metadata.getExpressionMetadata(ctx); + writer.visit(ctx.extstart()); + caster.checkWriteCast(expremd); + utility.checkWriteBranch(ctx); + } + + void processUnary(final UnaryContext ctx) { + final ExpressionMetadata unaryemd = metadata.getExpressionMetadata(ctx); + final Object postConst = unaryemd.postConst; + final Object preConst = unaryemd.preConst; + final Branch branch = utility.getBranch(ctx); + + if (postConst != null) { + if (ctx.BOOLNOT() != null) { + if (branch == null) { + utility.writeConstant(ctx, postConst); + } else { + if ((boolean)postConst && branch.tru != null) { + execute.goTo(branch.tru); + } else if (!(boolean)postConst && branch.fals != null) { + execute.goTo(branch.fals); + } + } + } else { + utility.writeConstant(ctx, postConst); + utility.checkWriteBranch(ctx); + } + } else if (preConst != null) { + if (branch == null) { + utility.writeConstant(ctx, preConst); + caster.checkWriteCast(unaryemd); + } else { + throw new IllegalStateException(WriterUtility.error(ctx) + "Unexpected state."); + } + } else { + final ExpressionContext exprctx = ctx.expression(); + + if (ctx.BOOLNOT() != null) { + final Branch local = utility.markBranch(ctx, exprctx); + + if (branch == null) { + local.fals = new Label(); + final Label aend = new Label(); + + writer.visit(exprctx); + + execute.push(false); + execute.goTo(aend); + execute.mark(local.fals); + execute.push(true); + execute.mark(aend); + + caster.checkWriteCast(unaryemd); + } else { + local.tru = branch.fals; + local.fals = branch.tru; + + writer.visit(exprctx); + } + } else { + final org.objectweb.asm.Type type = unaryemd.from.type; + final Sort sort = unaryemd.from.sort; + + writer.visit(exprctx); + + if (ctx.BWNOT() != null) { + if (sort == Sort.DEF) { + execute.invokeStatic(definition.defobjType.type, DEF_NOT_CALL); + } else { + if (sort == Sort.INT) { + utility.writeConstant(ctx, -1); + } else if (sort == Sort.LONG) { + utility.writeConstant(ctx, -1L); + } else { + throw new IllegalStateException(WriterUtility.error(ctx) + "Unexpected state."); + } + + execute.math(GeneratorAdapter.XOR, type); + } + } else if (ctx.SUB() != null) { + if (sort == Sort.DEF) { + execute.invokeStatic(definition.defobjType.type, DEF_NEG_CALL); + } else { + if (settings.getNumericOverflow()) { + execute.math(GeneratorAdapter.NEG, type); + } else { + if (sort == Sort.INT) { + execute.invokeStatic(definition.mathType.type, NEGATEEXACT_INT); + } else if (sort == Sort.LONG) { + execute.invokeStatic(definition.mathType.type, NEGATEEXACT_LONG); + } else { + throw new IllegalStateException(WriterUtility.error(ctx) + "Unexpected state."); + } + } + } + } else if (ctx.ADD() == null) { + throw new IllegalStateException(WriterUtility.error(ctx) + "Unexpected state."); + } + + caster.checkWriteCast(unaryemd); + utility.checkWriteBranch(ctx); + } + } + } + + void processCast(final CastContext ctx) { + final ExpressionMetadata castemd = metadata.getExpressionMetadata(ctx); + final Object postConst = castemd.postConst; + + if (postConst == null) { + writer.visit(ctx.expression()); + caster.checkWriteCast(castemd); + } else { + utility.writeConstant(ctx, postConst); + } + + utility.checkWriteBranch(ctx); + } + + void processBinary(final BinaryContext ctx) { + final ExpressionMetadata binaryemd = metadata.getExpressionMetadata(ctx); + final Object postConst = binaryemd.postConst; + final Object preConst = binaryemd.preConst; + final Branch branch = utility.getBranch(ctx); + + if (postConst != null) { + utility.writeConstant(ctx, postConst); + } else if (preConst != null) { + if (branch == null) { + utility.writeConstant(ctx, preConst); + caster.checkWriteCast(binaryemd); + } else { + throw new IllegalStateException(WriterUtility.error(ctx) + "Unexpected state."); + } + } else if (binaryemd.from.sort == Sort.STRING) { + final boolean marked = utility.containsStrings(ctx); + + if (!marked) { + utility.writeNewStrings(); + } + + final ExpressionContext exprctx0 = ctx.expression(0); + final ExpressionMetadata expremd0 = metadata.getExpressionMetadata(exprctx0); + utility.addStrings(exprctx0); + writer.visit(exprctx0); + + if (utility.containsStrings(exprctx0)) { + utility.writeAppendStrings(expremd0.from.sort); + utility.removeStrings(exprctx0); + } + + final ExpressionContext exprctx1 = ctx.expression(1); + final ExpressionMetadata expremd1 = metadata.getExpressionMetadata(exprctx1); + utility.addStrings(exprctx1); + writer.visit(exprctx1); + + if (utility.containsStrings(exprctx1)) { + utility.writeAppendStrings(expremd1.from.sort); + utility.removeStrings(exprctx1); + } + + if (marked) { + utility.removeStrings(ctx); + } else { + utility.writeToStrings(); + } + + caster.checkWriteCast(binaryemd); + } else { + final ExpressionContext exprctx0 = ctx.expression(0); + final ExpressionContext exprctx1 = ctx.expression(1); + + writer.visit(exprctx0); + writer.visit(exprctx1); + + final Type type = binaryemd.from; + + if (ctx.MUL() != null) utility.writeBinaryInstruction(ctx, type, MUL); + else if (ctx.DIV() != null) utility.writeBinaryInstruction(ctx, type, DIV); + else if (ctx.REM() != null) utility.writeBinaryInstruction(ctx, type, REM); + else if (ctx.ADD() != null) utility.writeBinaryInstruction(ctx, type, ADD); + else if (ctx.SUB() != null) utility.writeBinaryInstruction(ctx, type, SUB); + else if (ctx.LSH() != null) utility.writeBinaryInstruction(ctx, type, LSH); + else if (ctx.USH() != null) utility.writeBinaryInstruction(ctx, type, USH); + else if (ctx.RSH() != null) utility.writeBinaryInstruction(ctx, type, RSH); + else if (ctx.BWAND() != null) utility.writeBinaryInstruction(ctx, type, BWAND); + else if (ctx.BWXOR() != null) utility.writeBinaryInstruction(ctx, type, BWXOR); + else if (ctx.BWOR() != null) utility.writeBinaryInstruction(ctx, type, BWOR); + else { + throw new IllegalStateException(WriterUtility.error(ctx) + "Unexpected state."); + } + + caster.checkWriteCast(binaryemd); + } + + utility.checkWriteBranch(ctx); + } + + void processComp(final CompContext ctx) { + final ExpressionMetadata compemd = metadata.getExpressionMetadata(ctx); + final Object postConst = compemd.postConst; + final Object preConst = compemd.preConst; + final Branch branch = utility.getBranch(ctx); + + if (postConst != null) { + if (branch == null) { + utility.writeConstant(ctx, postConst); + } else { + if ((boolean)postConst && branch.tru != null) { + execute.mark(branch.tru); + } else if (!(boolean)postConst && branch.fals != null) { + execute.mark(branch.fals); + } + } + } else if (preConst != null) { + if (branch == null) { + utility.writeConstant(ctx, preConst); + caster.checkWriteCast(compemd); + } else { + throw new IllegalStateException(WriterUtility.error(ctx) + "Unexpected state."); + } + } else { + final ExpressionContext exprctx0 = ctx.expression(0); + final ExpressionMetadata expremd0 = metadata.getExpressionMetadata(exprctx0); + + final ExpressionContext exprctx1 = ctx.expression(1); + final ExpressionMetadata expremd1 = metadata.getExpressionMetadata(exprctx1); + final org.objectweb.asm.Type type = expremd1.to.type; + final Sort sort1 = expremd1.to.sort; + + writer.visit(exprctx0); + + if (!expremd1.isNull) { + writer.visit(exprctx1); + } + + final boolean tru = branch != null && branch.tru != null; + final boolean fals = branch != null && branch.fals != null; + final Label jump = tru ? branch.tru : fals ? branch.fals : new Label(); + final Label end = new Label(); + + final boolean eq = (ctx.EQ() != null || ctx.EQR() != null) && (tru || !fals) || + (ctx.NE() != null || ctx.NER() != null) && fals; + final boolean ne = (ctx.NE() != null || ctx.NER() != null) && (tru || !fals) || + (ctx.EQ() != null || ctx.EQR() != null) && fals; + final boolean lt = ctx.LT() != null && (tru || !fals) || ctx.GTE() != null && fals; + final boolean lte = ctx.LTE() != null && (tru || !fals) || ctx.GT() != null && fals; + final boolean gt = ctx.GT() != null && (tru || !fals) || ctx.LTE() != null && fals; + final boolean gte = ctx.GTE() != null && (tru || !fals) || ctx.LT() != null && fals; + + boolean writejump = true; + + switch (sort1) { + case VOID: + case BYTE: + case SHORT: + case CHAR: + throw new IllegalStateException(WriterUtility.error(ctx) + "Unexpected state."); + case BOOL: + if (eq) execute.ifZCmp(GeneratorAdapter.EQ, jump); + else if (ne) execute.ifZCmp(GeneratorAdapter.NE, jump); + else { + throw new IllegalStateException(WriterUtility.error(ctx) + "Unexpected state."); + } + + break; + case INT: + case LONG: + case FLOAT: + case DOUBLE: + if (eq) execute.ifCmp(type, GeneratorAdapter.EQ, jump); + else if (ne) execute.ifCmp(type, GeneratorAdapter.NE, jump); + else if (lt) execute.ifCmp(type, GeneratorAdapter.LT, jump); + else if (lte) execute.ifCmp(type, GeneratorAdapter.LE, jump); + else if (gt) execute.ifCmp(type, GeneratorAdapter.GT, jump); + else if (gte) execute.ifCmp(type, GeneratorAdapter.GE, jump); + else { + throw new IllegalStateException(WriterUtility.error(ctx) + "Unexpected state."); + } + + break; + case DEF: + if (eq) { + if (expremd1.isNull) { + execute.ifNull(jump); + } else if (!expremd0.isNull && ctx.EQ() != null) { + execute.invokeStatic(definition.defobjType.type, DEF_EQ_CALL); + } else { + execute.ifCmp(type, GeneratorAdapter.EQ, jump); + } + } else if (ne) { + if (expremd1.isNull) { + execute.ifNonNull(jump); + } else if (!expremd0.isNull && ctx.NE() != null) { + execute.invokeStatic(definition.defobjType.type, DEF_EQ_CALL); + execute.ifZCmp(GeneratorAdapter.EQ, jump); + } else { + execute.ifCmp(type, GeneratorAdapter.NE, jump); + } + } else if (lt) { + execute.invokeStatic(definition.defobjType.type, DEF_LT_CALL); + } else if (lte) { + execute.invokeStatic(definition.defobjType.type, DEF_LTE_CALL); + } else if (gt) { + execute.invokeStatic(definition.defobjType.type, DEF_GT_CALL); + } else if (gte) { + execute.invokeStatic(definition.defobjType.type, DEF_GTE_CALL); + } else { + throw new IllegalStateException(WriterUtility.error(ctx) + "Unexpected state."); + } + + writejump = expremd1.isNull || ne || ctx.EQR() != null; + + if (branch != null && !writejump) { + execute.ifZCmp(GeneratorAdapter.NE, jump); + } + + break; + default: + if (eq) { + if (expremd1.isNull) { + execute.ifNull(jump); + } else if (ctx.EQ() != null) { + execute.invokeStatic(definition.utilityType.type, CHECKEQUALS); + + if (branch != null) { + execute.ifZCmp(GeneratorAdapter.NE, jump); + } + + writejump = false; + } else { + execute.ifCmp(type, GeneratorAdapter.EQ, jump); + } + } else if (ne) { + if (expremd1.isNull) { + execute.ifNonNull(jump); + } else if (ctx.NE() != null) { + execute.invokeStatic(definition.utilityType.type, CHECKEQUALS); + execute.ifZCmp(GeneratorAdapter.EQ, jump); + } else { + execute.ifCmp(type, GeneratorAdapter.NE, jump); + } + } else { + throw new IllegalStateException(WriterUtility.error(ctx) + "Unexpected state."); + } + } + + if (branch == null) { + if (writejump) { + execute.push(false); + execute.goTo(end); + execute.mark(jump); + execute.push(true); + execute.mark(end); + } + + caster.checkWriteCast(compemd); + } + } + } + + void processBool(final BoolContext ctx) { + final ExpressionMetadata boolemd = metadata.getExpressionMetadata(ctx); + final Object postConst = boolemd.postConst; + final Object preConst = boolemd.preConst; + final Branch branch = utility.getBranch(ctx); + + if (postConst != null) { + if (branch == null) { + utility.writeConstant(ctx, postConst); + } else { + if ((boolean)postConst && branch.tru != null) { + execute.mark(branch.tru); + } else if (!(boolean)postConst && branch.fals != null) { + execute.mark(branch.fals); + } + } + } else if (preConst != null) { + if (branch == null) { + utility.writeConstant(ctx, preConst); + caster.checkWriteCast(boolemd); + } else { + throw new IllegalStateException(WriterUtility.error(ctx) + "Unexpected state."); + } + } else { + final ExpressionContext exprctx0 = ctx.expression(0); + final ExpressionContext exprctx1 = ctx.expression(1); + + if (branch == null) { + if (ctx.BOOLAND() != null) { + final Branch local = utility.markBranch(ctx, exprctx0, exprctx1); + local.fals = new Label(); + final Label end = new Label(); + + writer.visit(exprctx0); + writer.visit(exprctx1); + + execute.push(true); + execute.goTo(end); + execute.mark(local.fals); + execute.push(false); + execute.mark(end); + } else if (ctx.BOOLOR() != null) { + final Branch branch0 = utility.markBranch(ctx, exprctx0); + branch0.tru = new Label(); + final Branch branch1 = utility.markBranch(ctx, exprctx1); + branch1.fals = new Label(); + final Label aend = new Label(); + + writer.visit(exprctx0); + writer.visit(exprctx1); + + execute.mark(branch0.tru); + execute.push(true); + execute.goTo(aend); + execute.mark(branch1.fals); + execute.push(false); + execute.mark(aend); + } else { + throw new IllegalStateException(WriterUtility.error(ctx) + "Unexpected state."); + } + + caster.checkWriteCast(boolemd); + } else { + if (ctx.BOOLAND() != null) { + final Branch branch0 = utility.markBranch(ctx, exprctx0); + branch0.fals = branch.fals == null ? new Label() : branch.fals; + final Branch branch1 = utility.markBranch(ctx, exprctx1); + branch1.tru = branch.tru; + branch1.fals = branch.fals; + + writer.visit(exprctx0); + writer.visit(exprctx1); + + if (branch.fals == null) { + execute.mark(branch0.fals); + } + } else if (ctx.BOOLOR() != null) { + final Branch branch0 = utility.markBranch(ctx, exprctx0); + branch0.tru = branch.tru == null ? new Label() : branch.tru; + final Branch branch1 = utility.markBranch(ctx, exprctx1); + branch1.tru = branch.tru; + branch1.fals = branch.fals; + + writer.visit(exprctx0); + writer.visit(exprctx1); + + if (branch.tru == null) { + execute.mark(branch0.tru); + } + } else { + throw new IllegalStateException(WriterUtility.error(ctx) + "Unexpected state."); + } + } + } + } + + void processConditional(final ConditionalContext ctx) { + final ExpressionMetadata condemd = metadata.getExpressionMetadata(ctx); + final Branch branch = utility.getBranch(ctx); + + final ExpressionContext expr0 = ctx.expression(0); + final ExpressionContext expr1 = ctx.expression(1); + final ExpressionContext expr2 = ctx.expression(2); + + final Branch local = utility.markBranch(ctx, expr0); + local.fals = new Label(); + local.end = new Label(); + + if (branch != null) { + utility.copyBranch(branch, expr1, expr2); + } + + writer.visit(expr0); + writer.visit(expr1); + execute.goTo(local.end); + execute.mark(local.fals); + writer.visit(expr2); + execute.mark(local.end); + + if (branch == null) { + caster.checkWriteCast(condemd); + } + } + + void processAssignment(final AssignmentContext ctx) { + final ExpressionMetadata expremd = metadata.getExpressionMetadata(ctx); + writer.visit(ctx.extstart()); + caster.checkWriteCast(expremd); + utility.checkWriteBranch(ctx); + } + + void processIncrement(final IncrementContext ctx) { + final ExpressionMetadata incremd = metadata.getExpressionMetadata(ctx); + final Object postConst = incremd.postConst; + + if (postConst == null) { + utility.writeNumeric(ctx, incremd.preConst); + caster.checkWriteCast(incremd); + } else { + utility.writeConstant(ctx, postConst); + } + + utility.checkWriteBranch(ctx); + } +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterExternal.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterExternal.java new file mode 100644 index 00000000000..8ab729f98fa --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterExternal.java @@ -0,0 +1,769 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +import org.antlr.v4.runtime.ParserRuleContext; +import org.elasticsearch.painless.Definition.Constructor; +import org.elasticsearch.painless.Definition.Field; +import org.elasticsearch.painless.Definition.Method; +import org.elasticsearch.painless.Definition.Sort; +import org.elasticsearch.painless.Definition.Type; +import org.elasticsearch.painless.Metadata.ExpressionMetadata; +import org.elasticsearch.painless.Metadata.ExtNodeMetadata; +import org.elasticsearch.painless.Metadata.ExternalMetadata; +import org.elasticsearch.painless.PainlessParser.ExpressionContext; +import org.elasticsearch.painless.PainlessParser.ExtbraceContext; +import org.elasticsearch.painless.PainlessParser.ExtcallContext; +import org.elasticsearch.painless.PainlessParser.ExtcastContext; +import org.elasticsearch.painless.PainlessParser.ExtdotContext; +import org.elasticsearch.painless.PainlessParser.ExtfieldContext; +import org.elasticsearch.painless.PainlessParser.ExtnewContext; +import org.elasticsearch.painless.PainlessParser.ExtprecContext; +import org.elasticsearch.painless.PainlessParser.ExtstartContext; +import org.elasticsearch.painless.PainlessParser.ExtstringContext; +import org.elasticsearch.painless.PainlessParser.ExttypeContext; +import org.elasticsearch.painless.PainlessParser.ExtvarContext; +import org.objectweb.asm.Opcodes; +import org.objectweb.asm.commons.GeneratorAdapter; + +import java.util.List; + +import static org.elasticsearch.painless.PainlessParser.ADD; +import static org.elasticsearch.painless.PainlessParser.DIV; +import static org.elasticsearch.painless.PainlessParser.MUL; +import static org.elasticsearch.painless.PainlessParser.REM; +import static org.elasticsearch.painless.PainlessParser.SUB; +import static org.elasticsearch.painless.WriterConstants.CLASS_TYPE; +import static org.elasticsearch.painless.WriterConstants.DEFINITION_TYPE; +import static org.elasticsearch.painless.WriterConstants.DEF_ARRAY_LOAD; +import static org.elasticsearch.painless.WriterConstants.DEF_ARRAY_STORE; +import static org.elasticsearch.painless.WriterConstants.DEF_FIELD_LOAD; +import static org.elasticsearch.painless.WriterConstants.DEF_FIELD_STORE; +import static org.elasticsearch.painless.WriterConstants.DEF_METHOD_CALL; +import static org.elasticsearch.painless.WriterConstants.TOBYTEEXACT_INT; +import static org.elasticsearch.painless.WriterConstants.TOBYTEEXACT_LONG; +import static org.elasticsearch.painless.WriterConstants.TOBYTEWOOVERFLOW_DOUBLE; +import static org.elasticsearch.painless.WriterConstants.TOBYTEWOOVERFLOW_FLOAT; +import static org.elasticsearch.painless.WriterConstants.TOCHAREXACT_INT; +import static org.elasticsearch.painless.WriterConstants.TOCHAREXACT_LONG; +import static org.elasticsearch.painless.WriterConstants.TOCHARWOOVERFLOW_DOUBLE; +import static org.elasticsearch.painless.WriterConstants.TOCHARWOOVERFLOW_FLOAT; +import static org.elasticsearch.painless.WriterConstants.TOFLOATWOOVERFLOW_DOUBLE; +import static org.elasticsearch.painless.WriterConstants.TOINTEXACT_LONG; +import static org.elasticsearch.painless.WriterConstants.TOINTWOOVERFLOW_DOUBLE; +import static org.elasticsearch.painless.WriterConstants.TOINTWOOVERFLOW_FLOAT; +import static org.elasticsearch.painless.WriterConstants.TOLONGWOOVERFLOW_DOUBLE; +import static org.elasticsearch.painless.WriterConstants.TOLONGWOOVERFLOW_FLOAT; +import static org.elasticsearch.painless.WriterConstants.TOSHORTEXACT_INT; +import static org.elasticsearch.painless.WriterConstants.TOSHORTEXACT_LONG; +import static org.elasticsearch.painless.WriterConstants.TOSHORTWOOVERFLOW_DOUBLE; +import static org.elasticsearch.painless.WriterConstants.TOSHORTWOOVERFLOW_FLOAT; + +class WriterExternal { + private final Metadata metadata; + private final Definition definition; + private final CompilerSettings settings; + + private final GeneratorAdapter execute; + + private final Writer writer; + private final WriterUtility utility; + private final WriterCaster caster; + + WriterExternal(final Metadata metadata, final GeneratorAdapter execute, final Writer writer, + final WriterUtility utility, final WriterCaster caster) { + this.metadata = metadata; + definition = metadata.definition; + settings = metadata.settings; + + this.execute = execute; + + this.writer = writer; + this.utility = utility; + this.caster = caster; + } + + void processExtstart(final ExtstartContext ctx) { + final ExternalMetadata startemd = metadata.getExternalMetadata(ctx); + + if (startemd.token == ADD) { + final ExpressionMetadata storeemd = metadata.getExpressionMetadata(startemd.storeExpr); + + if (startemd.current.sort == Sort.STRING || storeemd.from.sort == Sort.STRING) { + utility.writeNewStrings(); + utility.addStrings(startemd.storeExpr); + } + } + + final ExtprecContext precctx = ctx.extprec(); + final ExtcastContext castctx = ctx.extcast(); + final ExttypeContext typectx = ctx.exttype(); + final ExtvarContext varctx = ctx.extvar(); + final ExtnewContext newctx = ctx.extnew(); + final ExtstringContext stringctx = ctx.extstring(); + + if (precctx != null) { + writer.visit(precctx); + } else if (castctx != null) { + writer.visit(castctx); + } else if (typectx != null) { + writer.visit(typectx); + } else if (varctx != null) { + writer.visit(varctx); + } else if (newctx != null) { + writer.visit(newctx); + } else if (stringctx != null) { + writer.visit(stringctx); + } else { + throw new IllegalStateException(WriterUtility.error(ctx) + "Unexpected state."); + } + } + + void processExtprec(final ExtprecContext ctx) { + final ExtprecContext precctx = ctx.extprec(); + final ExtcastContext castctx = ctx.extcast(); + final ExttypeContext typectx = ctx.exttype(); + final ExtvarContext varctx = ctx.extvar(); + final ExtnewContext newctx = ctx.extnew(); + final ExtstringContext stringctx = ctx.extstring(); + + if (precctx != null) { + writer.visit(precctx); + } else if (castctx != null) { + writer.visit(castctx); + } else if (typectx != null) { + writer.visit(typectx); + } else if (varctx != null) { + writer.visit(varctx); + } else if (newctx != null) { + writer.visit(newctx); + } else if (stringctx != null) { + writer.visit(stringctx); + } else { + throw new IllegalStateException(WriterUtility.error(ctx) + "Unexpected state."); + } + + final ExtdotContext dotctx = ctx.extdot(); + final ExtbraceContext bracectx = ctx.extbrace(); + + if (dotctx != null) { + writer.visit(dotctx); + } else if (bracectx != null) { + writer.visit(bracectx); + } + } + + void processExtcast(final ExtcastContext ctx) { + ExtNodeMetadata castenmd = metadata.getExtNodeMetadata(ctx); + + final ExtprecContext precctx = ctx.extprec(); + final ExtcastContext castctx = ctx.extcast(); + final ExttypeContext typectx = ctx.exttype(); + final ExtvarContext varctx = ctx.extvar(); + final ExtnewContext newctx = ctx.extnew(); + final ExtstringContext stringctx = ctx.extstring(); + + if (precctx != null) { + writer.visit(precctx); + } else if (castctx != null) { + writer.visit(castctx); + } else if (typectx != null) { + writer.visit(typectx); + } else if (varctx != null) { + writer.visit(varctx); + } else if (newctx != null) { + writer.visit(newctx); + } else if (stringctx != null) { + writer.visit(stringctx); + } else { + throw new IllegalStateException(WriterUtility.error(ctx) + "Unexpected state."); + } + + caster.checkWriteCast(ctx, castenmd.castTo); + } + + void processExtbrace(final ExtbraceContext ctx) { + final ExpressionContext exprctx = ctx.expression(); + + writer.visit(exprctx); + writeLoadStoreExternal(ctx); + + final ExtdotContext dotctx = ctx.extdot(); + final ExtbraceContext bracectx = ctx.extbrace(); + + if (dotctx != null) { + writer.visit(dotctx); + } else if (bracectx != null) { + writer.visit(bracectx); + } + } + + void processExtdot(final ExtdotContext ctx) { + final ExtcallContext callctx = ctx.extcall(); + final ExtfieldContext fieldctx = ctx.extfield(); + + if (callctx != null) { + writer.visit(callctx); + } else if (fieldctx != null) { + writer.visit(fieldctx); + } + } + + void processExttype(final ExttypeContext ctx) { + writer.visit(ctx.extdot()); + } + + void processExtcall(final ExtcallContext ctx) { + writeCallExternal(ctx); + + final ExtdotContext dotctx = ctx.extdot(); + final ExtbraceContext bracectx = ctx.extbrace(); + + if (dotctx != null) { + writer.visit(dotctx); + } else if (bracectx != null) { + writer.visit(bracectx); + } + } + + void processExtvar(final ExtvarContext ctx) { + writeLoadStoreExternal(ctx); + + final ExtdotContext dotctx = ctx.extdot(); + final ExtbraceContext bracectx = ctx.extbrace(); + + if (dotctx != null) { + writer.visit(dotctx); + } else if (bracectx != null) { + writer.visit(bracectx); + } + } + + void processExtfield(final ExtfieldContext ctx) { + writeLoadStoreExternal(ctx); + + final ExtdotContext dotctx = ctx.extdot(); + final ExtbraceContext bracectx = ctx.extbrace(); + + if (dotctx != null) { + writer.visit(dotctx); + } else if (bracectx != null) { + writer.visit(bracectx); + } + } + + void processExtnew(final ExtnewContext ctx) { + writeNewExternal(ctx); + + final ExtdotContext dotctx = ctx.extdot(); + final ExtbraceContext bracectx = ctx.extbrace(); + + if (dotctx != null) { + writer.visit(dotctx); + } else if (bracectx != null) { + writer.visit(bracectx); + } + } + + void processExtstring(final ExtstringContext ctx) { + final ExtNodeMetadata stringenmd = metadata.getExtNodeMetadata(ctx); + + utility.writeConstant(ctx, stringenmd.target); + + final ExtdotContext dotctx = ctx.extdot(); + final ExtbraceContext bracectx = ctx.extbrace(); + + if (dotctx != null) { + writer.visit(dotctx); + } else if (bracectx != null) { + writer.visit(bracectx); + } + } + + private void writeLoadStoreExternal(final ParserRuleContext source) { + final ExtNodeMetadata sourceenmd = metadata.getExtNodeMetadata(source); + final ExternalMetadata parentemd = metadata.getExternalMetadata(sourceenmd.parent); + + final boolean length = "#length".equals(sourceenmd.target); + final boolean array = "#brace".equals(sourceenmd.target); + final boolean name = sourceenmd.target instanceof String && !length && !array; + final boolean variable = sourceenmd.target instanceof Integer; + final boolean field = sourceenmd.target instanceof Field; + final boolean shortcut = sourceenmd.target instanceof Object[]; + + if (!length && !variable && !field && !array && !name && !shortcut) { + throw new IllegalStateException(WriterUtility.error(source) + "Target not found for load/store."); + } + + final boolean maplist = shortcut && (boolean)((Object[])sourceenmd.target)[2]; + final Object constant = shortcut ? ((Object[])sourceenmd.target)[3] : null; + + final boolean x1 = field || name || (shortcut && !maplist); + final boolean x2 = array || (shortcut && maplist); + + if (length) { + execute.arrayLength(); + } else if (sourceenmd.last && parentemd.storeExpr != null) { + final ExpressionMetadata expremd = metadata.getExpressionMetadata(parentemd.storeExpr); + final boolean cat = utility.containsStrings(parentemd.storeExpr); + + if (cat) { + if (field || name || shortcut) { + execute.dupX1(); + } else if (array) { + execute.dup2X1(); + } + + if (maplist) { + if (constant != null) { + utility.writeConstant(source, constant); + } + + execute.dupX2(); + } + + writeLoadStoreInstruction(source, false, variable, field, name, array, shortcut); + utility.writeAppendStrings(sourceenmd.type.sort); + writer.visit(parentemd.storeExpr); + + if (utility.containsStrings(parentemd.storeExpr)) { + utility.writeAppendStrings(expremd.to.sort); + utility.removeStrings(parentemd.storeExpr); + } + + utility.writeToStrings(); + caster.checkWriteCast(source, sourceenmd.castTo); + + if (parentemd.read) { + utility.writeDup(sourceenmd.type.sort.size, x1, x2); + } + + writeLoadStoreInstruction(source, true, variable, field, name, array, shortcut); + } else if (parentemd.token > 0) { + final int token = parentemd.token; + + if (field || name || shortcut) { + execute.dup(); + } else if (array) { + execute.dup2(); + } + + if (maplist) { + if (constant != null) { + utility.writeConstant(source, constant); + } + + execute.dupX1(); + } + + writeLoadStoreInstruction(source, false, variable, field, name, array, shortcut); + + if (parentemd.read && parentemd.post) { + utility.writeDup(sourceenmd.type.sort.size, x1, x2); + } + + caster.checkWriteCast(source, sourceenmd.castFrom); + writer.visit(parentemd.storeExpr); + + utility.writeBinaryInstruction(source, sourceenmd.promote, token); + + boolean exact = false; + + if (!settings.getNumericOverflow() && expremd.typesafe && sourceenmd.type.sort != Sort.DEF && + (token == MUL || token == DIV || token == REM || token == ADD || token == SUB)) { + exact = writeExactInstruction(sourceenmd.type.sort, sourceenmd.promote.sort); + } + + if (!exact) { + caster.checkWriteCast(source, sourceenmd.castTo); + } + + if (parentemd.read && !parentemd.post) { + utility.writeDup(sourceenmd.type.sort.size, x1, x2); + } + + writeLoadStoreInstruction(source, true, variable, field, name, array, shortcut); + } else { + if (constant != null) { + utility.writeConstant(source, constant); + } + + writer.visit(parentemd.storeExpr); + + if (parentemd.read) { + utility.writeDup(sourceenmd.type.sort.size, x1, x2); + } + + writeLoadStoreInstruction(source, true, variable, field, name, array, shortcut); + } + } else { + if (constant != null) { + utility.writeConstant(source, constant); + } + + writeLoadStoreInstruction(source, false, variable, field, name, array, shortcut); + } + } + + private void writeLoadStoreInstruction(final ParserRuleContext source, + final boolean store, final boolean variable, + final boolean field, final boolean name, + final boolean array, final boolean shortcut) { + final ExtNodeMetadata sourceemd = metadata.getExtNodeMetadata(source); + + if (variable) { + writeLoadStoreVariable(source, store, sourceemd.type, (int)sourceemd.target); + } else if (field) { + writeLoadStoreField(store, (Field)sourceemd.target); + } else if (name) { + writeLoadStoreField(source, store, (String)sourceemd.target); + } else if (array) { + writeLoadStoreArray(source, store, sourceemd.type); + } else if (shortcut) { + Object[] targets = (Object[])sourceemd.target; + writeLoadStoreShortcut(store, (Method)targets[0], (Method)targets[1]); + } else { + throw new IllegalStateException(WriterUtility.error(source) + "Load/Store requires a variable, field, or array."); + } + } + + private void writeLoadStoreVariable(final ParserRuleContext source, final boolean store, + final Type type, final int slot) { + if (type.sort == Sort.VOID) { + throw new IllegalStateException(WriterUtility.error(source) + "Cannot load/store void type."); + } + + if (store) { + execute.visitVarInsn(type.type.getOpcode(Opcodes.ISTORE), slot); + } else { + execute.visitVarInsn(type.type.getOpcode(Opcodes.ILOAD), slot); + } + } + + private void writeLoadStoreField(final boolean store, final Field field) { + if (java.lang.reflect.Modifier.isStatic(field.reflect.getModifiers())) { + if (store) { + execute.putStatic(field.owner.type, field.reflect.getName(), field.type.type); + } else { + execute.getStatic(field.owner.type, field.reflect.getName(), field.type.type); + + if (!field.generic.clazz.equals(field.type.clazz)) { + execute.checkCast(field.generic.type); + } + } + } else { + if (store) { + execute.putField(field.owner.type, field.reflect.getName(), field.type.type); + } else { + execute.getField(field.owner.type, field.reflect.getName(), field.type.type); + + if (!field.generic.clazz.equals(field.type.clazz)) { + execute.checkCast(field.generic.type); + } + } + } + } + + private void writeLoadStoreField(final ParserRuleContext source, final boolean store, final String name) { + if (store) { + final ExtNodeMetadata sourceemd = metadata.getExtNodeMetadata(source); + final ExternalMetadata parentemd = metadata.getExternalMetadata(sourceemd.parent); + final ExpressionMetadata expremd = metadata.getExpressionMetadata(parentemd.storeExpr); + + execute.push(name); + execute.loadThis(); + execute.getField(CLASS_TYPE, "definition", DEFINITION_TYPE); + execute.push(parentemd.token == 0 && expremd.typesafe); + execute.invokeStatic(definition.defobjType.type, DEF_FIELD_STORE); + } else { + execute.push(name); + execute.loadThis(); + execute.getField(CLASS_TYPE, "definition", DEFINITION_TYPE); + execute.invokeStatic(definition.defobjType.type, DEF_FIELD_LOAD); + } + } + + private void writeLoadStoreArray(final ParserRuleContext source, final boolean store, final Type type) { + if (type.sort == Sort.VOID) { + throw new IllegalStateException(WriterUtility.error(source) + "Cannot load/store void type."); + } + + if (type.sort == Sort.DEF) { + final ExtbraceContext bracectx = (ExtbraceContext)source; + final ExpressionMetadata expremd0 = metadata.getExpressionMetadata(bracectx.expression()); + + if (store) { + final ExtNodeMetadata braceenmd = metadata.getExtNodeMetadata(bracectx); + final ExternalMetadata parentemd = metadata.getExternalMetadata(braceenmd.parent); + final ExpressionMetadata expremd1 = metadata.getExpressionMetadata(parentemd.storeExpr); + + execute.loadThis(); + execute.getField(CLASS_TYPE, "definition", DEFINITION_TYPE); + execute.push(expremd0.typesafe); + execute.push(parentemd.token == 0 && expremd1.typesafe); + execute.invokeStatic(definition.defobjType.type, DEF_ARRAY_STORE); + } else { + execute.loadThis(); + execute.getField(CLASS_TYPE, "definition", DEFINITION_TYPE); + execute.push(expremd0.typesafe); + execute.invokeStatic(definition.defobjType.type, DEF_ARRAY_LOAD); + } + } else { + if (store) { + execute.arrayStore(type.type); + } else { + execute.arrayLoad(type.type); + } + } + } + + private void writeLoadStoreShortcut(final boolean store, final Method getter, final Method setter) { + final Method method = store ? setter : getter; + + if (java.lang.reflect.Modifier.isInterface(getter.owner.clazz.getModifiers())) { + execute.invokeInterface(method.owner.type, method.method); + } else { + execute.invokeVirtual(method.owner.type, method.method); + } + + if (store) { + utility.writePop(method.rtn.type.getSize()); + } else if (!method.rtn.clazz.equals(method.handle.type().returnType())) { + execute.checkCast(method.rtn.type); + } + } + + /** + * Called for any compound assignment (including increment/decrement instructions). + * We have to be stricter than writeBinary, and do overflow checks against the original type's size + * instead of the promoted type's size, since the result will be implicitly cast back. + * + * @return This will be true if an instruction is written, false otherwise. + */ + private boolean writeExactInstruction(final Sort osort, final Sort psort) { + if (psort == Sort.DOUBLE) { + if (osort == Sort.FLOAT) { + execute.invokeStatic(definition.utilityType.type, TOFLOATWOOVERFLOW_DOUBLE); + } else if (osort == Sort.FLOAT_OBJ) { + execute.invokeStatic(definition.utilityType.type, TOFLOATWOOVERFLOW_DOUBLE); + execute.checkCast(definition.floatobjType.type); + } else if (osort == Sort.LONG) { + execute.invokeStatic(definition.utilityType.type, TOLONGWOOVERFLOW_DOUBLE); + } else if (osort == Sort.LONG_OBJ) { + execute.invokeStatic(definition.utilityType.type, TOLONGWOOVERFLOW_DOUBLE); + execute.checkCast(definition.longobjType.type); + } else if (osort == Sort.INT) { + execute.invokeStatic(definition.utilityType.type, TOINTWOOVERFLOW_DOUBLE); + } else if (osort == Sort.INT_OBJ) { + execute.invokeStatic(definition.utilityType.type, TOINTWOOVERFLOW_DOUBLE); + execute.checkCast(definition.intobjType.type); + } else if (osort == Sort.CHAR) { + execute.invokeStatic(definition.utilityType.type, TOCHARWOOVERFLOW_DOUBLE); + } else if (osort == Sort.CHAR_OBJ) { + execute.invokeStatic(definition.utilityType.type, TOCHARWOOVERFLOW_DOUBLE); + execute.checkCast(definition.charobjType.type); + } else if (osort == Sort.SHORT) { + execute.invokeStatic(definition.utilityType.type, TOSHORTWOOVERFLOW_DOUBLE); + } else if (osort == Sort.SHORT_OBJ) { + execute.invokeStatic(definition.utilityType.type, TOSHORTWOOVERFLOW_DOUBLE); + execute.checkCast(definition.shortobjType.type); + } else if (osort == Sort.BYTE) { + execute.invokeStatic(definition.utilityType.type, TOBYTEWOOVERFLOW_DOUBLE); + } else if (osort == Sort.BYTE_OBJ) { + execute.invokeStatic(definition.utilityType.type, TOBYTEWOOVERFLOW_DOUBLE); + execute.checkCast(definition.byteobjType.type); + } else { + return false; + } + } else if (psort == Sort.FLOAT) { + if (osort == Sort.LONG) { + execute.invokeStatic(definition.utilityType.type, TOLONGWOOVERFLOW_FLOAT); + } else if (osort == Sort.LONG_OBJ) { + execute.invokeStatic(definition.utilityType.type, TOLONGWOOVERFLOW_FLOAT); + execute.checkCast(definition.longobjType.type); + } else if (osort == Sort.INT) { + execute.invokeStatic(definition.utilityType.type, TOINTWOOVERFLOW_FLOAT); + } else if (osort == Sort.INT_OBJ) { + execute.invokeStatic(definition.utilityType.type, TOINTWOOVERFLOW_FLOAT); + execute.checkCast(definition.intobjType.type); + } else if (osort == Sort.CHAR) { + execute.invokeStatic(definition.utilityType.type, TOCHARWOOVERFLOW_FLOAT); + } else if (osort == Sort.CHAR_OBJ) { + execute.invokeStatic(definition.utilityType.type, TOCHARWOOVERFLOW_FLOAT); + execute.checkCast(definition.charobjType.type); + } else if (osort == Sort.SHORT) { + execute.invokeStatic(definition.utilityType.type, TOSHORTWOOVERFLOW_FLOAT); + } else if (osort == Sort.SHORT_OBJ) { + execute.invokeStatic(definition.utilityType.type, TOSHORTWOOVERFLOW_FLOAT); + execute.checkCast(definition.shortobjType.type); + } else if (osort == Sort.BYTE) { + execute.invokeStatic(definition.utilityType.type, TOBYTEWOOVERFLOW_FLOAT); + } else if (osort == Sort.BYTE_OBJ) { + execute.invokeStatic(definition.utilityType.type, TOBYTEWOOVERFLOW_FLOAT); + execute.checkCast(definition.byteobjType.type); + } else { + return false; + } + } else if (psort == Sort.LONG) { + if (osort == Sort.INT) { + execute.invokeStatic(definition.mathType.type, TOINTEXACT_LONG); + } else if (osort == Sort.INT_OBJ) { + execute.invokeStatic(definition.mathType.type, TOINTEXACT_LONG); + execute.checkCast(definition.intobjType.type); + } else if (osort == Sort.CHAR) { + execute.invokeStatic(definition.utilityType.type, TOCHAREXACT_LONG); + } else if (osort == Sort.CHAR_OBJ) { + execute.invokeStatic(definition.utilityType.type, TOCHAREXACT_LONG); + execute.checkCast(definition.charobjType.type); + } else if (osort == Sort.SHORT) { + execute.invokeStatic(definition.utilityType.type, TOSHORTEXACT_LONG); + } else if (osort == Sort.SHORT_OBJ) { + execute.invokeStatic(definition.utilityType.type, TOSHORTEXACT_LONG); + execute.checkCast(definition.shortobjType.type); + } else if (osort == Sort.BYTE) { + execute.invokeStatic(definition.utilityType.type, TOBYTEEXACT_LONG); + } else if (osort == Sort.BYTE_OBJ) { + execute.invokeStatic(definition.utilityType.type, TOBYTEEXACT_LONG); + execute.checkCast(definition.byteobjType.type); + } else { + return false; + } + } else if (psort == Sort.INT) { + if (osort == Sort.CHAR) { + execute.invokeStatic(definition.utilityType.type, TOCHAREXACT_INT); + } else if (osort == Sort.CHAR_OBJ) { + execute.invokeStatic(definition.utilityType.type, TOCHAREXACT_INT); + execute.checkCast(definition.charobjType.type); + } else if (osort == Sort.SHORT) { + execute.invokeStatic(definition.utilityType.type, TOSHORTEXACT_INT); + } else if (osort == Sort.SHORT_OBJ) { + execute.invokeStatic(definition.utilityType.type, TOSHORTEXACT_INT); + execute.checkCast(definition.shortobjType.type); + } else if (osort == Sort.BYTE) { + execute.invokeStatic(definition.utilityType.type, TOBYTEEXACT_INT); + } else if (osort == Sort.BYTE_OBJ) { + execute.invokeStatic(definition.utilityType.type, TOBYTEEXACT_INT); + execute.checkCast(definition.byteobjType.type); + } else { + return false; + } + } else { + return false; + } + + return true; + } + + private void writeNewExternal(final ExtnewContext source) { + final ExtNodeMetadata sourceenmd = metadata.getExtNodeMetadata(source); + final ExternalMetadata parentemd = metadata.getExternalMetadata(sourceenmd.parent); + + final boolean makearray = "#makearray".equals(sourceenmd.target); + final boolean constructor = sourceenmd.target instanceof Constructor; + + if (!makearray && !constructor) { + throw new IllegalStateException(WriterUtility.error(source) + "Target not found for new call."); + } + + if (makearray) { + for (final ExpressionContext exprctx : source.expression()) { + writer.visit(exprctx); + } + + if (sourceenmd.type.sort == Sort.ARRAY) { + execute.visitMultiANewArrayInsn(sourceenmd.type.type.getDescriptor(), sourceenmd.type.type.getDimensions()); + } else { + execute.newArray(sourceenmd.type.type); + } + } else { + execute.newInstance(sourceenmd.type.type); + + if (parentemd.read) { + execute.dup(); + } + + for (final ExpressionContext exprctx : source.arguments().expression()) { + writer.visit(exprctx); + } + + final Constructor target = (Constructor)sourceenmd.target; + execute.invokeConstructor(target.owner.type, target.method); + } + } + + private void writeCallExternal(final ExtcallContext source) { + final ExtNodeMetadata sourceenmd = metadata.getExtNodeMetadata(source); + + final boolean method = sourceenmd.target instanceof Method; + final boolean def = sourceenmd.target instanceof String; + + if (!method && !def) { + throw new IllegalStateException(WriterUtility.error(source) + "Target not found for call."); + } + + final List arguments = source.arguments().expression(); + + if (method) { + for (final ExpressionContext exprctx : arguments) { + writer.visit(exprctx); + } + + final Method target = (Method)sourceenmd.target; + + if (java.lang.reflect.Modifier.isStatic(target.reflect.getModifiers())) { + execute.invokeStatic(target.owner.type, target.method); + } else if (java.lang.reflect.Modifier.isInterface(target.owner.clazz.getModifiers())) { + execute.invokeInterface(target.owner.type, target.method); + } else { + execute.invokeVirtual(target.owner.type, target.method); + } + + if (!target.rtn.clazz.equals(target.handle.type().returnType())) { + execute.checkCast(target.rtn.type); + } + } else { + execute.push((String)sourceenmd.target); + execute.loadThis(); + execute.getField(CLASS_TYPE, "definition", DEFINITION_TYPE); + + execute.push(arguments.size()); + execute.newArray(definition.defType.type); + + for (int argument = 0; argument < arguments.size(); ++argument) { + execute.dup(); + execute.push(argument); + writer.visit(arguments.get(argument)); + execute.arrayStore(definition.defType.type); + } + + execute.push(arguments.size()); + execute.newArray(definition.booleanType.type); + + for (int argument = 0; argument < arguments.size(); ++argument) { + execute.dup(); + execute.push(argument); + execute.push(metadata.getExpressionMetadata(arguments.get(argument)).typesafe); + execute.arrayStore(definition.booleanType.type); + } + + execute.invokeStatic(definition.defobjType.type, DEF_METHOD_CALL); + } + } +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterStatement.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterStatement.java new file mode 100644 index 00000000000..a0e70f319b5 --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterStatement.java @@ -0,0 +1,391 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +import org.elasticsearch.painless.Definition.Sort; +import org.elasticsearch.painless.Metadata.ExpressionMetadata; +import org.elasticsearch.painless.Metadata.StatementMetadata; +import org.elasticsearch.painless.PainlessParser.AfterthoughtContext; +import org.elasticsearch.painless.PainlessParser.BlockContext; +import org.elasticsearch.painless.PainlessParser.DeclContext; +import org.elasticsearch.painless.PainlessParser.DeclarationContext; +import org.elasticsearch.painless.PainlessParser.DecltypeContext; +import org.elasticsearch.painless.PainlessParser.DeclvarContext; +import org.elasticsearch.painless.PainlessParser.DoContext; +import org.elasticsearch.painless.PainlessParser.EmptyscopeContext; +import org.elasticsearch.painless.PainlessParser.ExprContext; +import org.elasticsearch.painless.PainlessParser.ExpressionContext; +import org.elasticsearch.painless.PainlessParser.ForContext; +import org.elasticsearch.painless.PainlessParser.IfContext; +import org.elasticsearch.painless.PainlessParser.InitializerContext; +import org.elasticsearch.painless.PainlessParser.MultipleContext; +import org.elasticsearch.painless.PainlessParser.ReturnContext; +import org.elasticsearch.painless.PainlessParser.SingleContext; +import org.elasticsearch.painless.PainlessParser.SourceContext; +import org.elasticsearch.painless.PainlessParser.StatementContext; +import org.elasticsearch.painless.PainlessParser.ThrowContext; +import org.elasticsearch.painless.PainlessParser.TrapContext; +import org.elasticsearch.painless.PainlessParser.TryContext; +import org.elasticsearch.painless.PainlessParser.WhileContext; +import org.elasticsearch.painless.WriterUtility.Branch; +import org.objectweb.asm.Label; +import org.objectweb.asm.Opcodes; +import org.objectweb.asm.commons.GeneratorAdapter; + +import static org.elasticsearch.painless.WriterConstants.PAINLESS_ERROR_TYPE; + +class WriterStatement { + private final Metadata metadata; + + private final GeneratorAdapter execute; + + private final Writer writer; + private final WriterUtility utility; + + WriterStatement(final Metadata metadata, final GeneratorAdapter execute, + final Writer writer, final WriterUtility utility) { + this.metadata = metadata; + + this.execute = execute; + + this.writer = writer; + this.utility = utility; + } + + void processSource(final SourceContext ctx) { + final StatementMetadata sourcesmd = metadata.getStatementMetadata(ctx); + + for (final StatementContext sctx : ctx.statement()) { + writer.visit(sctx); + } + + if (!sourcesmd.methodEscape) { + execute.visitInsn(Opcodes.ACONST_NULL); + execute.returnValue(); + } + } + + void processIf(final IfContext ctx) { + final ExpressionContext exprctx = ctx.expression(); + final boolean els = ctx.ELSE() != null; + final Branch branch = utility.markBranch(ctx, exprctx); + branch.end = new Label(); + branch.fals = els ? new Label() : branch.end; + + writer.visit(exprctx); + + final BlockContext blockctx0 = ctx.block(0); + final StatementMetadata blockmd0 = metadata.getStatementMetadata(blockctx0); + writer.visit(blockctx0); + + if (els) { + if (!blockmd0.allLast) { + execute.goTo(branch.end); + } + + execute.mark(branch.fals); + writer.visit(ctx.block(1)); + } + + execute.mark(branch.end); + } + + void processWhile(final WhileContext ctx) { + final ExpressionContext exprctx = ctx.expression(); + final Branch branch = utility.markBranch(ctx, exprctx); + branch.begin = new Label(); + branch.end = new Label(); + branch.fals = branch.end; + + utility.pushJump(branch); + execute.mark(branch.begin); + writer.visit(exprctx); + + final BlockContext blockctx = ctx.block(); + boolean allLast = false; + + if (blockctx != null) { + final StatementMetadata blocksmd = metadata.getStatementMetadata(blockctx); + allLast = blocksmd.allLast; + writeLoopCounter(blocksmd.count > 0 ? blocksmd.count : 1); + writer.visit(blockctx); + } else if (ctx.empty() != null) { + writeLoopCounter(1); + } else { + throw new IllegalStateException(WriterUtility.error(ctx) + "Unexpected state."); + } + + if (!allLast) { + execute.goTo(branch.begin); + } + + execute.mark(branch.end); + utility.popJump(); + } + + void processDo(final DoContext ctx) { + final ExpressionContext exprctx = ctx.expression(); + final Branch branch = utility.markBranch(ctx, exprctx); + Label start = new Label(); + branch.begin = new Label(); + branch.end = new Label(); + branch.fals = branch.end; + + final BlockContext blockctx = ctx.block(); + final StatementMetadata blocksmd = metadata.getStatementMetadata(blockctx); + + utility.pushJump(branch); + execute.mark(start); + writer.visit(blockctx); + execute.mark(branch.begin); + writer.visit(exprctx); + writeLoopCounter(blocksmd.count > 0 ? blocksmd.count : 1); + execute.goTo(start); + execute.mark(branch.end); + utility.popJump(); + } + + void processFor(final ForContext ctx) { + final ExpressionContext exprctx = ctx.expression(); + final AfterthoughtContext atctx = ctx.afterthought(); + final Branch branch = utility.markBranch(ctx, exprctx); + final Label start = new Label(); + branch.begin = atctx == null ? start : new Label(); + branch.end = new Label(); + branch.fals = branch.end; + + utility.pushJump(branch); + + if (ctx.initializer() != null) { + writer.visit(ctx.initializer()); + } + + execute.mark(start); + + if (exprctx != null) { + writer.visit(exprctx); + } + + final BlockContext blockctx = ctx.block(); + boolean allLast = false; + + if (blockctx != null) { + StatementMetadata blocksmd = metadata.getStatementMetadata(blockctx); + allLast = blocksmd.allLast; + + int count = blocksmd.count > 0 ? blocksmd.count : 1; + + if (atctx != null) { + ++count; + } + + writeLoopCounter(count); + writer.visit(blockctx); + } else if (ctx.empty() != null) { + writeLoopCounter(1); + } else { + throw new IllegalStateException(WriterUtility.error(ctx) + "Unexpected state."); + } + + if (atctx != null) { + execute.mark(branch.begin); + writer.visit(atctx); + } + + if (atctx != null || !allLast) { + execute.goTo(start); + } + + execute.mark(branch.end); + utility.popJump(); + } + + void processDecl(final DeclContext ctx) { + writer.visit(ctx.declaration()); + } + + void processContinue() { + final Branch jump = utility.peekJump(); + execute.goTo(jump.begin); + } + + void processBreak() { + final Branch jump = utility.peekJump(); + execute.goTo(jump.end); + } + + void processReturn(final ReturnContext ctx) { + writer.visit(ctx.expression()); + execute.returnValue(); + } + + void processTry(final TryContext ctx) { + final TrapContext[] trapctxs = new TrapContext[ctx.trap().size()]; + ctx.trap().toArray(trapctxs); + final Branch branch = utility.markBranch(ctx, trapctxs); + + Label end = new Label(); + branch.begin = new Label(); + branch.end = new Label(); + branch.tru = trapctxs.length > 1 ? end : null; + + execute.mark(branch.begin); + + final BlockContext blockctx = ctx.block(); + final StatementMetadata blocksmd = metadata.getStatementMetadata(blockctx); + writer.visit(blockctx); + + if (!blocksmd.allLast) { + execute.goTo(end); + } + + execute.mark(branch.end); + + for (final TrapContext trapctx : trapctxs) { + writer.visit(trapctx); + } + + if (!blocksmd.allLast || trapctxs.length > 1) { + execute.mark(end); + } + } + + void processThrow(final ThrowContext ctx) { + writer.visit(ctx.expression()); + execute.throwException(); + } + + void processExpr(final ExprContext ctx) { + final StatementMetadata exprsmd = metadata.getStatementMetadata(ctx); + final ExpressionContext exprctx = ctx.expression(); + final ExpressionMetadata expremd = metadata.getExpressionMetadata(exprctx); + writer.visit(exprctx); + + if (exprsmd.methodEscape) { + execute.returnValue(); + } else { + utility.writePop(expremd.to.type.getSize()); + } + } + + void processMultiple(final MultipleContext ctx) { + for (final StatementContext sctx : ctx.statement()) { + writer.visit(sctx); + } + } + + void processSingle(final SingleContext ctx) { + writer.visit(ctx.statement()); + } + + void processInitializer(InitializerContext ctx) { + final DeclarationContext declctx = ctx.declaration(); + final ExpressionContext exprctx = ctx.expression(); + + if (declctx != null) { + writer.visit(declctx); + } else if (exprctx != null) { + final ExpressionMetadata expremd = metadata.getExpressionMetadata(exprctx); + writer.visit(exprctx); + utility.writePop(expremd.to.type.getSize()); + } else { + throw new IllegalStateException(WriterUtility.error(ctx) + "Unexpected state."); + } + } + + void processAfterthought(AfterthoughtContext ctx) { + final ExpressionContext exprctx = ctx.expression(); + final ExpressionMetadata expremd = metadata.getExpressionMetadata(exprctx); + writer.visit(ctx.expression()); + utility.writePop(expremd.to.type.getSize()); + } + + void processDeclaration(DeclarationContext ctx) { + for (final DeclvarContext declctx : ctx.declvar()) { + writer.visit(declctx); + } + } + + void processDeclvar(final DeclvarContext ctx) { + final ExpressionMetadata declvaremd = metadata.getExpressionMetadata(ctx); + final org.objectweb.asm.Type type = declvaremd.to.type; + final Sort sort = declvaremd.to.sort; + final int slot = (int)declvaremd.postConst; + + final ExpressionContext exprctx = ctx.expression(); + final boolean initialize = exprctx == null; + + if (!initialize) { + writer.visit(exprctx); + } + + switch (sort) { + case VOID: throw new IllegalStateException(WriterUtility.error(ctx) + "Unexpected state."); + case BOOL: + case BYTE: + case SHORT: + case CHAR: + case INT: if (initialize) execute.push(0); break; + case LONG: if (initialize) execute.push(0L); break; + case FLOAT: if (initialize) execute.push(0.0F); break; + case DOUBLE: if (initialize) execute.push(0.0); break; + default: if (initialize) execute.visitInsn(Opcodes.ACONST_NULL); + } + + execute.visitVarInsn(type.getOpcode(Opcodes.ISTORE), slot); + } + + void processTrap(final TrapContext ctx) { + final StatementMetadata trapsmd = metadata.getStatementMetadata(ctx); + + final Branch branch = utility.getBranch(ctx); + final Label jump = new Label(); + + final BlockContext blockctx = ctx.block(); + final EmptyscopeContext emptyctx = ctx.emptyscope(); + + execute.mark(jump); + execute.visitVarInsn(trapsmd.exception.type.getOpcode(Opcodes.ISTORE), trapsmd.slot); + + if (blockctx != null) { + writer.visit(ctx.block()); + } else if (emptyctx == null) { + throw new IllegalStateException(WriterUtility.error(ctx) + "Unexpected state."); + } + + execute.visitTryCatchBlock(branch.begin, branch.end, jump, trapsmd.exception.type.getInternalName()); + + if (branch.tru != null && !trapsmd.allLast) { + execute.goTo(branch.tru); + } + } + + private void writeLoopCounter(final int count) { + final Label end = new Label(); + + execute.iinc(metadata.loopCounterSlot, -count); + execute.visitVarInsn(Opcodes.ILOAD, metadata.loopCounterSlot); + execute.push(0); + execute.ifICmp(GeneratorAdapter.GT, end); + execute.throwException(PAINLESS_ERROR_TYPE, + "The maximum number of statements that can be executed in a loop has been reached."); + execute.mark(end); + } +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterUtility.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterUtility.java new file mode 100644 index 00000000000..d7e4f4ce42c --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterUtility.java @@ -0,0 +1,387 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +import org.antlr.v4.runtime.ParserRuleContext; +import org.elasticsearch.painless.Definition.Sort; +import org.elasticsearch.painless.Definition.Type; +import org.objectweb.asm.Label; +import org.objectweb.asm.Opcodes; +import org.objectweb.asm.commons.GeneratorAdapter; + +import java.util.ArrayDeque; +import java.util.Deque; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.painless.PainlessParser.ADD; +import static org.elasticsearch.painless.PainlessParser.BWAND; +import static org.elasticsearch.painless.PainlessParser.BWOR; +import static org.elasticsearch.painless.PainlessParser.BWXOR; +import static org.elasticsearch.painless.PainlessParser.DIV; +import static org.elasticsearch.painless.PainlessParser.LSH; +import static org.elasticsearch.painless.PainlessParser.MUL; +import static org.elasticsearch.painless.PainlessParser.REM; +import static org.elasticsearch.painless.PainlessParser.RSH; +import static org.elasticsearch.painless.PainlessParser.SUB; +import static org.elasticsearch.painless.PainlessParser.USH; +import static org.elasticsearch.painless.WriterConstants.ADDEXACT_INT; +import static org.elasticsearch.painless.WriterConstants.ADDEXACT_LONG; +import static org.elasticsearch.painless.WriterConstants.ADDWOOVERLOW_DOUBLE; +import static org.elasticsearch.painless.WriterConstants.ADDWOOVERLOW_FLOAT; +import static org.elasticsearch.painless.WriterConstants.DEF_ADD_CALL; +import static org.elasticsearch.painless.WriterConstants.DEF_AND_CALL; +import static org.elasticsearch.painless.WriterConstants.DEF_DIV_CALL; +import static org.elasticsearch.painless.WriterConstants.DEF_LSH_CALL; +import static org.elasticsearch.painless.WriterConstants.DEF_MUL_CALL; +import static org.elasticsearch.painless.WriterConstants.DEF_OR_CALL; +import static org.elasticsearch.painless.WriterConstants.DEF_REM_CALL; +import static org.elasticsearch.painless.WriterConstants.DEF_RSH_CALL; +import static org.elasticsearch.painless.WriterConstants.DEF_SUB_CALL; +import static org.elasticsearch.painless.WriterConstants.DEF_USH_CALL; +import static org.elasticsearch.painless.WriterConstants.DEF_XOR_CALL; +import static org.elasticsearch.painless.WriterConstants.DIVWOOVERLOW_DOUBLE; +import static org.elasticsearch.painless.WriterConstants.DIVWOOVERLOW_FLOAT; +import static org.elasticsearch.painless.WriterConstants.DIVWOOVERLOW_INT; +import static org.elasticsearch.painless.WriterConstants.DIVWOOVERLOW_LONG; +import static org.elasticsearch.painless.WriterConstants.MULEXACT_INT; +import static org.elasticsearch.painless.WriterConstants.MULEXACT_LONG; +import static org.elasticsearch.painless.WriterConstants.MULWOOVERLOW_DOUBLE; +import static org.elasticsearch.painless.WriterConstants.MULWOOVERLOW_FLOAT; +import static org.elasticsearch.painless.WriterConstants.REMWOOVERLOW_DOUBLE; +import static org.elasticsearch.painless.WriterConstants.REMWOOVERLOW_FLOAT; +import static org.elasticsearch.painless.WriterConstants.STRINGBUILDER_APPEND_BOOLEAN; +import static org.elasticsearch.painless.WriterConstants.STRINGBUILDER_APPEND_CHAR; +import static org.elasticsearch.painless.WriterConstants.STRINGBUILDER_APPEND_DOUBLE; +import static org.elasticsearch.painless.WriterConstants.STRINGBUILDER_APPEND_FLOAT; +import static org.elasticsearch.painless.WriterConstants.STRINGBUILDER_APPEND_INT; +import static org.elasticsearch.painless.WriterConstants.STRINGBUILDER_APPEND_LONG; +import static org.elasticsearch.painless.WriterConstants.STRINGBUILDER_APPEND_OBJECT; +import static org.elasticsearch.painless.WriterConstants.STRINGBUILDER_APPEND_STRING; +import static org.elasticsearch.painless.WriterConstants.STRINGBUILDER_CONSTRUCTOR; +import static org.elasticsearch.painless.WriterConstants.STRINGBUILDER_TOSTRING; +import static org.elasticsearch.painless.WriterConstants.STRINGBUILDER_TYPE; +import static org.elasticsearch.painless.WriterConstants.SUBEXACT_INT; +import static org.elasticsearch.painless.WriterConstants.SUBEXACT_LONG; +import static org.elasticsearch.painless.WriterConstants.SUBWOOVERLOW_DOUBLE; +import static org.elasticsearch.painless.WriterConstants.SUBWOOVERLOW_FLOAT; + +class WriterUtility { + static class Branch { + final ParserRuleContext source; + + Label begin = null; + Label end = null; + Label tru = null; + Label fals = null; + + private Branch(final ParserRuleContext source) { + this.source = source; + } + } + + /** + * A utility method to output consistent error messages. + * @param ctx The ANTLR node the error occurred in. + * @return The error message with tacked on line number and character position. + */ + static String error(final ParserRuleContext ctx) { + return "Writer Error [" + ctx.getStart().getLine() + ":" + ctx.getStart().getCharPositionInLine() + "]: "; + } + + private final Definition definition; + private final CompilerSettings settings; + + private final GeneratorAdapter execute; + + private final Map branches = new HashMap<>(); + private final Deque jumps = new ArrayDeque<>(); + private final Set strings = new HashSet<>(); + + WriterUtility(final Metadata metadata, final GeneratorAdapter execute) { + definition = metadata.definition; + settings = metadata.settings; + + this.execute = execute; + } + + Branch markBranch(final ParserRuleContext source, final ParserRuleContext... nodes) { + final Branch branch = new Branch(source); + + for (final ParserRuleContext node : nodes) { + branches.put(node, branch); + } + + return branch; + } + + void copyBranch(final Branch branch, final ParserRuleContext... nodes) { + for (final ParserRuleContext node : nodes) { + branches.put(node, branch); + } + } + + Branch getBranch(final ParserRuleContext source) { + return branches.get(source); + } + + void checkWriteBranch(final ParserRuleContext source) { + final Branch branch = getBranch(source); + + if (branch != null) { + if (branch.tru != null) { + execute.visitJumpInsn(Opcodes.IFNE, branch.tru); + } else if (branch.fals != null) { + execute.visitJumpInsn(Opcodes.IFEQ, branch.fals); + } + } + } + + void pushJump(final Branch branch) { + jumps.push(branch); + } + + Branch peekJump() { + return jumps.peek(); + } + + void popJump() { + jumps.pop(); + } + + void addStrings(final ParserRuleContext source) { + strings.add(source); + } + + boolean containsStrings(final ParserRuleContext source) { + return strings.contains(source); + } + + void removeStrings(final ParserRuleContext source) { + strings.remove(source); + } + + void writeDup(final int size, final boolean x1, final boolean x2) { + if (size == 1) { + if (x2) { + execute.dupX2(); + } else if (x1) { + execute.dupX1(); + } else { + execute.dup(); + } + } else if (size == 2) { + if (x2) { + execute.dup2X2(); + } else if (x1) { + execute.dup2X1(); + } else { + execute.dup2(); + } + } + } + + void writePop(final int size) { + if (size == 1) { + execute.pop(); + } else if (size == 2) { + execute.pop2(); + } + } + + void writeConstant(final ParserRuleContext source, final Object constant) { + if (constant instanceof Number) { + writeNumeric(source, constant); + } else if (constant instanceof Character) { + writeNumeric(source, (int)(char)constant); + } else if (constant instanceof String) { + writeString(source, constant); + } else if (constant instanceof Boolean) { + writeBoolean(source, constant); + } else if (constant != null) { + throw new IllegalStateException(WriterUtility.error(source) + "Unexpected state."); + } + } + + void writeNumeric(final ParserRuleContext source, final Object numeric) { + if (numeric instanceof Double) { + execute.push((double)numeric); + } else if (numeric instanceof Float) { + execute.push((float)numeric); + } else if (numeric instanceof Long) { + execute.push((long)numeric); + } else if (numeric instanceof Number) { + execute.push(((Number)numeric).intValue()); + } else { + throw new IllegalStateException(WriterUtility.error(source) + "Unexpected state."); + } + } + + void writeString(final ParserRuleContext source, final Object string) { + if (string instanceof String) { + execute.push((String)string); + } else { + throw new IllegalStateException(WriterUtility.error(source) + "Unexpected state."); + } + } + + void writeBoolean(final ParserRuleContext source, final Object bool) { + if (bool instanceof Boolean) { + execute.push((boolean)bool); + } else { + throw new IllegalStateException(WriterUtility.error(source) + "Unexpected state."); + } + } + + void writeNewStrings() { + execute.newInstance(STRINGBUILDER_TYPE); + execute.dup(); + execute.invokeConstructor(STRINGBUILDER_TYPE, STRINGBUILDER_CONSTRUCTOR); + } + + void writeAppendStrings(final Sort sort) { + switch (sort) { + case BOOL: execute.invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_BOOLEAN); break; + case CHAR: execute.invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_CHAR); break; + case BYTE: + case SHORT: + case INT: execute.invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_INT); break; + case LONG: execute.invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_LONG); break; + case FLOAT: execute.invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_FLOAT); break; + case DOUBLE: execute.invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_DOUBLE); break; + case STRING: execute.invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_STRING); break; + default: execute.invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_OBJECT); + } + } + + void writeToStrings() { + execute.invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_TOSTRING); + } + + void writeBinaryInstruction(final ParserRuleContext source, final Type type, final int token) { + final Sort sort = type.sort; + final boolean exact = !settings.getNumericOverflow() && + ((sort == Sort.INT || sort == Sort.LONG) && + (token == MUL || token == DIV || token == ADD || token == SUB) || + (sort == Sort.FLOAT || sort == Sort.DOUBLE) && + (token == MUL || token == DIV || token == REM || token == ADD || token == SUB)); + + // If it's a 64-bit shift, fix-up the last argument to truncate to 32-bits. + // Note that unlike java, this means we still do binary promotion of shifts, + // but it keeps things simple, and this check works because we promote shifts. + if (sort == Sort.LONG && (token == LSH || token == USH || token == RSH)) { + execute.cast(org.objectweb.asm.Type.LONG_TYPE, org.objectweb.asm.Type.INT_TYPE); + } + + if (exact) { + switch (sort) { + case INT: + switch (token) { + case MUL: execute.invokeStatic(definition.mathType.type, MULEXACT_INT); break; + case DIV: execute.invokeStatic(definition.utilityType.type, DIVWOOVERLOW_INT); break; + case ADD: execute.invokeStatic(definition.mathType.type, ADDEXACT_INT); break; + case SUB: execute.invokeStatic(definition.mathType.type, SUBEXACT_INT); break; + default: + throw new IllegalStateException(WriterUtility.error(source) + "Unexpected state."); + } + + break; + case LONG: + switch (token) { + case MUL: execute.invokeStatic(definition.mathType.type, MULEXACT_LONG); break; + case DIV: execute.invokeStatic(definition.utilityType.type, DIVWOOVERLOW_LONG); break; + case ADD: execute.invokeStatic(definition.mathType.type, ADDEXACT_LONG); break; + case SUB: execute.invokeStatic(definition.mathType.type, SUBEXACT_LONG); break; + default: + throw new IllegalStateException(WriterUtility.error(source) + "Unexpected state."); + } + + break; + case FLOAT: + switch (token) { + case MUL: execute.invokeStatic(definition.utilityType.type, MULWOOVERLOW_FLOAT); break; + case DIV: execute.invokeStatic(definition.utilityType.type, DIVWOOVERLOW_FLOAT); break; + case REM: execute.invokeStatic(definition.utilityType.type, REMWOOVERLOW_FLOAT); break; + case ADD: execute.invokeStatic(definition.utilityType.type, ADDWOOVERLOW_FLOAT); break; + case SUB: execute.invokeStatic(definition.utilityType.type, SUBWOOVERLOW_FLOAT); break; + default: + throw new IllegalStateException(WriterUtility.error(source) + "Unexpected state."); + } + + break; + case DOUBLE: + switch (token) { + case MUL: execute.invokeStatic(definition.utilityType.type, MULWOOVERLOW_DOUBLE); break; + case DIV: execute.invokeStatic(definition.utilityType.type, DIVWOOVERLOW_DOUBLE); break; + case REM: execute.invokeStatic(definition.utilityType.type, REMWOOVERLOW_DOUBLE); break; + case ADD: execute.invokeStatic(definition.utilityType.type, ADDWOOVERLOW_DOUBLE); break; + case SUB: execute.invokeStatic(definition.utilityType.type, SUBWOOVERLOW_DOUBLE); break; + default: + throw new IllegalStateException(WriterUtility.error(source) + "Unexpected state."); + } + + break; + default: + throw new IllegalStateException(WriterUtility.error(source) + "Unexpected state."); + } + } else { + if ((sort == Sort.FLOAT || sort == Sort.DOUBLE) && + (token == LSH || token == USH || token == RSH || token == BWAND || token == BWXOR || token == BWOR)) { + throw new IllegalStateException(WriterUtility.error(source) + "Unexpected state."); + } + + if (sort == Sort.DEF) { + switch (token) { + case MUL: execute.invokeStatic(definition.defobjType.type, DEF_MUL_CALL); break; + case DIV: execute.invokeStatic(definition.defobjType.type, DEF_DIV_CALL); break; + case REM: execute.invokeStatic(definition.defobjType.type, DEF_REM_CALL); break; + case ADD: execute.invokeStatic(definition.defobjType.type, DEF_ADD_CALL); break; + case SUB: execute.invokeStatic(definition.defobjType.type, DEF_SUB_CALL); break; + case LSH: execute.invokeStatic(definition.defobjType.type, DEF_LSH_CALL); break; + case USH: execute.invokeStatic(definition.defobjType.type, DEF_RSH_CALL); break; + case RSH: execute.invokeStatic(definition.defobjType.type, DEF_USH_CALL); break; + case BWAND: execute.invokeStatic(definition.defobjType.type, DEF_AND_CALL); break; + case BWXOR: execute.invokeStatic(definition.defobjType.type, DEF_XOR_CALL); break; + case BWOR: execute.invokeStatic(definition.defobjType.type, DEF_OR_CALL); break; + default: + throw new IllegalStateException(WriterUtility.error(source) + "Unexpected state."); + } + } else { + switch (token) { + case MUL: execute.math(GeneratorAdapter.MUL, type.type); break; + case DIV: execute.math(GeneratorAdapter.DIV, type.type); break; + case REM: execute.math(GeneratorAdapter.REM, type.type); break; + case ADD: execute.math(GeneratorAdapter.ADD, type.type); break; + case SUB: execute.math(GeneratorAdapter.SUB, type.type); break; + case LSH: execute.math(GeneratorAdapter.SHL, type.type); break; + case USH: execute.math(GeneratorAdapter.USHR, type.type); break; + case RSH: execute.math(GeneratorAdapter.SHR, type.type); break; + case BWAND: execute.math(GeneratorAdapter.AND, type.type); break; + case BWXOR: execute.math(GeneratorAdapter.XOR, type.type); break; + case BWOR: execute.math(GeneratorAdapter.OR, type.type); break; + default: + throw new IllegalStateException(WriterUtility.error(source) + "Unexpected state."); + } + } + } + } +} diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java index 861c03cd706..2eb0cc5ba78 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java @@ -47,6 +47,7 @@ import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.Iterator; @@ -103,7 +104,8 @@ public abstract class AbstractAsyncBulkByScrollAction docs); - protected abstract Response buildResponse(TimeValue took, List indexingFailures, List searchFailures); + protected abstract Response buildResponse(TimeValue took, List indexingFailures, List searchFailures, + boolean timedOut); public void start() { initialSearch(); @@ -161,8 +163,13 @@ public abstract class AbstractAsyncBulkByScrollAction 0) { - startNormalTermination(emptyList(), unmodifiableList(Arrays.asList(searchResponse.getShardFailures()))); + if ( // If any of the shards failed that should abort the request. + (searchResponse.getShardFailures() != null && searchResponse.getShardFailures().length > 0) + // Timeouts aren't shard failures but we still need to pass them back to the user. + || searchResponse.isTimedOut() + ) { + startNormalTermination(emptyList(), unmodifiableList(Arrays.asList(searchResponse.getShardFailures())), + searchResponse.isTimedOut()); return; } long total = searchResponse.getHits().totalHits(); @@ -176,7 +183,7 @@ public abstract class AbstractAsyncBulkByScrollAction= mainRequest.getSize()) { // We've processed all the requested docs. - startNormalTermination(emptyList(), emptyList()); + startNormalTermination(emptyList(), emptyList(), false); return; } startNextScroll(); @@ -311,9 +318,9 @@ public abstract class AbstractAsyncBulkByScrollAction indexingFailures, List searchFailures) { - if (false == mainRequest.isRefresh()) { - finishHim(null, indexingFailures, searchFailures); + void startNormalTermination(List indexingFailures, List searchFailures, boolean timedOut) { + if (task.isCancelled() || false == mainRequest.isRefresh()) { + finishHim(null, indexingFailures, searchFailures, timedOut); return; } RefreshRequest refresh = new RefreshRequest(); @@ -321,7 +328,7 @@ public abstract class AbstractAsyncBulkByScrollAction() { @Override public void onResponse(RefreshResponse response) { - finishHim(null, indexingFailures, searchFailures); + finishHim(null, indexingFailures, searchFailures, timedOut); } @Override @@ -337,7 +344,7 @@ public abstract class AbstractAsyncBulkByScrollAction indexingFailures, List searchFailures) { + void finishHim(Throwable failure, List indexingFailures, List searchFailures, boolean timedOut) { String scrollId = scroll.get(); if (Strings.hasLength(scrollId)) { /* @@ -369,7 +377,8 @@ public abstract class AbstractAsyncBulkByScrollAction indices) { + destinationIndices.addAll(indices); + } + /** * Wraps a backoffPolicy in another policy that counts the number of backoffs acquired. */ diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java index 6f50b216c9b..f3dbfb60c60 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java @@ -23,7 +23,7 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.indices.query.IndicesQueriesRegistry; @@ -33,6 +33,7 @@ import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.aggregations.AggregatorParsers; +import org.elasticsearch.search.suggest.Suggesters; import org.elasticsearch.tasks.LoggingTaskListener; import org.elasticsearch.tasks.Task; @@ -42,14 +43,17 @@ public abstract class AbstractBaseReindexRestHandler> extends BaseRestHandler { protected final IndicesQueriesRegistry indicesQueriesRegistry; protected final AggregatorParsers aggParsers; + protected final Suggesters suggesters; private final ClusterService clusterService; private final TA action; protected AbstractBaseReindexRestHandler(Settings settings, Client client, - IndicesQueriesRegistry indicesQueriesRegistry, AggregatorParsers aggParsers, ClusterService clusterService, TA action) { + IndicesQueriesRegistry indicesQueriesRegistry, AggregatorParsers aggParsers, Suggesters suggesters, + ClusterService clusterService, TA action) { super(settings, client); this.indicesQueriesRegistry = indicesQueriesRegistry; this.aggParsers = aggParsers; + this.suggesters = suggesters; this.clusterService = clusterService; this.action = action; } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponse.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponse.java index ca1a53ef999..60de9bfbd03 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponse.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponse.java @@ -45,16 +45,18 @@ public class BulkIndexByScrollResponse extends ActionResponse implements ToXCont private BulkByScrollTask.Status status; private List indexingFailures; private List searchFailures; + private boolean timedOut; public BulkIndexByScrollResponse() { } public BulkIndexByScrollResponse(TimeValue took, BulkByScrollTask.Status status, List indexingFailures, - List searchFailures) { + List searchFailures, boolean timedOut) { this.took = took; this.status = requireNonNull(status, "Null status not supported"); this.indexingFailures = indexingFailures; this.searchFailures = searchFailures; + this.timedOut = timedOut; } public TimeValue getTook() { @@ -103,6 +105,13 @@ public class BulkIndexByScrollResponse extends ActionResponse implements ToXCont return searchFailures; } + /** + * Did any of the sub-requests that were part of this request timeout? + */ + public boolean isTimedOut() { + return timedOut; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -116,6 +125,7 @@ public class BulkIndexByScrollResponse extends ActionResponse implements ToXCont for (ShardSearchFailure failure: searchFailures) { failure.writeTo(out); } + out.writeBoolean(timedOut); } @Override @@ -135,11 +145,13 @@ public class BulkIndexByScrollResponse extends ActionResponse implements ToXCont searchFailures.add(readShardSearchFailure(in)); } this.searchFailures = unmodifiableList(searchFailures); + this.timedOut = in.readBoolean(); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field("took", took.millis()); + builder.field("timed_out", timedOut); status.innerXContent(builder, params, false, false); builder.startArray("failures"); for (Failure failure: indexingFailures) { diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponseContentListener.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponseContentListener.java index 24fdb16b397..24612aa14de 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponseContentListener.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponseContentListener.java @@ -19,7 +19,9 @@ package org.elasticsearch.index.reindex; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.bulk.BulkItemResponse.Failure; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.support.RestToXContentListener; @@ -35,12 +37,25 @@ public class BulkIndexByScrollResponseContentListener status.getStatus()) { status = failure.getStatus(); } } + for (ShardSearchFailure failure: response.getSearchFailures()) { + RestStatus failureStatus = ExceptionsHelper.status(failure.getCause()); + if (failureStatus.getStatus() > status.getStatus()) { + status = failureStatus; + } + } return status; } } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexPlugin.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexPlugin.java index a01c6e3b30e..9ab025a2527 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexPlugin.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexPlugin.java @@ -41,8 +41,9 @@ public class ReindexPlugin extends Plugin { actionModule.registerAction(UpdateByQueryAction.INSTANCE, TransportUpdateByQueryAction.class); } - public void onModule(NetworkModule restModule) { - restModule.registerRestHandler(RestReindexAction.class); - restModule.registerRestHandler(RestUpdateByQueryAction.class); + public void onModule(NetworkModule networkModule) { + networkModule.registerRestHandler(RestReindexAction.class); + networkModule.registerRestHandler(RestUpdateByQueryAction.class); + networkModule.registerTaskStatus(BulkByScrollTask.Status.PROTOTYPE); } } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java index 1ac6117d02b..d51fb7e8bc1 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java @@ -19,19 +19,31 @@ package org.elasticsearch.index.reindex; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; + import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.CompositeIndicesRequest; +import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.uid.Versions; -import java.io.IOException; +import static java.util.Collections.unmodifiableList; import static org.elasticsearch.action.ValidateActions.addValidationError; import static org.elasticsearch.index.VersionType.INTERNAL; -public class ReindexRequest extends AbstractBulkIndexByScrollRequest { +/** + * Request to reindex some documents from one index to another. This implements CompositeIndicesRequest but in a misleading way. Rather than + * returning all the subrequests that it will make it tries to return a representative set of subrequests. This is best-effort for a bunch + * of reasons, not least of which that scripts are allowed to change the destination request in drastic ways, including changing the index + * to which documents are written. + */ +public class ReindexRequest extends AbstractBulkIndexByScrollRequest implements CompositeIndicesRequest { /** * Prototype for index requests. */ @@ -123,4 +135,20 @@ public class ReindexRequest extends AbstractBulkIndexByScrollRequestnot + * accurate since it returns a prototype {@link IndexRequest} and not the actual requests that will be issued as part of the + * execution of this request. Additionally, scripts can modify the underlying {@link IndexRequest} and change values such as the index, + * type, {@link org.elasticsearch.action.support.IndicesOptions}. In short - only use this for very course reasoning about the request. + * + * @return a list comprising of the {@link SearchRequest} and the prototype {@link IndexRequest} + */ + @Override + public List subRequests() { + assert getSearchRequest() != null; + assert getDestination() != null; + return unmodifiableList(Arrays.asList(getSearchRequest(), getDestination())); + } } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexResponse.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexResponse.java index a4aee0c00d3..7e74fe26ec2 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexResponse.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexResponse.java @@ -35,8 +35,9 @@ public class ReindexResponse extends BulkIndexByScrollResponse { public ReindexResponse() { } - public ReindexResponse(TimeValue took, Status status, List indexingFailures, List searchFailures) { - super(took, status, indexingFailures, searchFailures); + public ReindexResponse(TimeValue took, Status status, List indexingFailures, List searchFailures, + boolean timedOut) { + super(took, status, indexingFailures, searchFailures, timedOut); } public long getCreated() { @@ -46,6 +47,7 @@ public class ReindexResponse extends BulkIndexByScrollResponse { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field("took", getTook()); + builder.field("timed_out", isTimedOut()); getStatus().innerXContent(builder, params, true, false); builder.startArray("failures"); for (Failure failure: getIndexingFailures()) { diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java index 1130dc7beab..f78d4e282d3 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java @@ -23,7 +23,7 @@ import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.inject.Inject; @@ -43,6 +43,7 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.AggregatorParsers; +import org.elasticsearch.search.suggest.Suggesters; import java.io.IOException; import java.util.List; @@ -76,7 +77,7 @@ public class RestReindexAction extends AbstractBaseReindexRestHandler destParser = new ObjectParser<>("dest"); @@ -102,9 +103,9 @@ public class RestReindexAction extends AbstractBaseReindexRestHandler { @Inject public RestUpdateByQueryAction(Settings settings, RestController controller, Client client, - IndicesQueriesRegistry indicesQueriesRegistry, AggregatorParsers aggParsers, ClusterService clusterService, - TransportUpdateByQueryAction action) { - super(settings, client, indicesQueriesRegistry, aggParsers, clusterService, action); + IndicesQueriesRegistry indicesQueriesRegistry, AggregatorParsers aggParsers, Suggesters suggesters, + ClusterService clusterService, TransportUpdateByQueryAction action) { + super(settings, client, indicesQueriesRegistry, aggParsers, suggesters, clusterService, action); controller.registerHandler(POST, "/{index}/_update_by_query", this); controller.registerHandler(POST, "/{index}/{type}/_update_by_query", this); } @@ -96,7 +97,7 @@ public class RestUpdateByQueryAction extends } } RestSearchAction.parseSearchRequest(internalRequest.getSearchRequest(), indicesQueriesRegistry, request, - parseFieldMatcher, aggParsers, bodyContent); + parseFieldMatcher, aggParsers, suggesters, bodyContent); String conflicts = request.param("conflicts"); if (conflicts != null) { @@ -107,7 +108,10 @@ public class RestUpdateByQueryAction extends internalRequest.setSize(internalRequest.getSearchRequest().source().size()); internalRequest.setPipeline(request.param("pipeline")); internalRequest.getSearchRequest().source().size(request.paramAsInt("scroll_size", scrollSize)); - + // Let the requester set search timeout. It is probably only going to be useful for testing but who knows. + if (request.hasParam("search_timeout")) { + internalRequest.getSearchRequest().source().timeout(request.paramAsTime("search_timeout", null)); + } execute(request, internalRequest, channel); } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java index 069ee032f8e..482f101653d 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java @@ -29,9 +29,9 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.uid.Versions; @@ -96,9 +96,9 @@ public class TransportReindexAction extends HandledTransportAction indexingFailures, List searchFailures) { - return new ReindexResponse(took, task.getStatus(), indexingFailures, searchFailures); + protected ReindexResponse buildResponse(TimeValue took, List indexingFailures, List searchFailures, + boolean timedOut) { + return new ReindexResponse(took, task.getStatus(), indexingFailures, searchFailures, timedOut); } /* diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java index 0e13c6718dd..d004e86ac0c 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java @@ -96,8 +96,8 @@ public class TransportUpdateByQueryAction extends HandledTransportAction indexingFailures, - List searchFailures) { - return new BulkIndexByScrollResponse(took, task.getStatus(), indexingFailures, searchFailures); + List searchFailures, boolean timedOut) { + return new BulkIndexByScrollResponse(took, task.getStatus(), indexingFailures, searchFailures, timedOut); } @Override diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java index b2775393877..915921d6077 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java @@ -19,13 +19,23 @@ package org.elasticsearch.index.reindex; +import java.util.ArrayList; +import java.util.List; + +import org.elasticsearch.action.CompositeIndicesRequest; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; +import static java.util.Collections.unmodifiableList; + /** - * Request to reindex a set of documents where they are without changing their - * locations or IDs. + * Request to update some documents. That means you can't change their type, id, index, or anything like that. This implements + * CompositeIndicesRequest but in a misleading way. Rather than returning all the subrequests that it will make it tries to return a + * representative set of subrequests. This is best-effort but better than {@linkplain ReindexRequest} because scripts can't change the + * destination index and things. */ -public class UpdateByQueryRequest extends AbstractBulkIndexByScrollRequest { +public class UpdateByQueryRequest extends AbstractBulkIndexByScrollRequest implements CompositeIndicesRequest { /** * Ingest pipeline to set on index requests made by this action. */ @@ -64,4 +74,26 @@ public class UpdateByQueryRequest extends AbstractBulkIndexByScrollRequestnot + * accurate since it returns dummy {@link IndexRequest}s and not the actual requests that will be issued as part of the + * execution of this request. + * + * @return a list comprising of the {@link SearchRequest} and dummy {@link IndexRequest}s + */ + @Override + public List subRequests() { + assert getSearchRequest() != null; + List subRequests = new ArrayList<>(); + // One dummy IndexRequest per destination index. + for (String index : getSearchRequest().indices()) { + IndexRequest request = new IndexRequest(); + request.index(index); + subRequests.add(request); + } + subRequests.add(getSearchRequest()); + return unmodifiableList(subRequests); + }; } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java index ae05f3270df..a4e9c42a33e 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.bulk.BackoffPolicy; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkItemResponse.Failure; @@ -74,10 +75,12 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; +import static java.util.Collections.singleton; import static org.apache.lucene.util.TestUtil.randomSimpleString; import static org.elasticsearch.action.bulk.BackoffPolicy.constantBackoff; import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; @@ -248,15 +251,33 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { */ public void testShardFailuresAbortRequest() throws Exception { ShardSearchFailure shardFailure = new ShardSearchFailure(new RuntimeException("test")); - new DummyAbstractAsyncBulkByScrollAction() - .onScrollResponse(new SearchResponse(null, scrollId(), 5, 4, randomLong(), new ShardSearchFailure[] { shardFailure })); + InternalSearchResponse internalResponse = new InternalSearchResponse(null, null, null, null, false, null); + new DummyAbstractAsyncBulkByScrollAction().onScrollResponse( + new SearchResponse(internalResponse, scrollId(), 5, 4, randomLong(), new ShardSearchFailure[] { shardFailure })); BulkIndexByScrollResponse response = listener.get(); assertThat(response.getIndexingFailures(), emptyCollectionOf(Failure.class)); assertThat(response.getSearchFailures(), contains(shardFailure)); + assertFalse(response.isTimedOut()); assertNull(response.getReasonCancelled()); assertThat(client.scrollsCleared, contains(scrollId)); } + /** + * Mimicks search timeouts. + */ + public void testSearchTimeoutsAbortRequest() throws Exception { + InternalSearchResponse internalResponse = new InternalSearchResponse(null, null, null, null, true, null); + new DummyAbstractAsyncBulkByScrollAction() + .onScrollResponse(new SearchResponse(internalResponse, scrollId(), 5, 4, randomLong(), new ShardSearchFailure[0])); + BulkIndexByScrollResponse response = listener.get(); + assertThat(response.getIndexingFailures(), emptyCollectionOf(Failure.class)); + assertThat(response.getSearchFailures(), emptyCollectionOf(ShardSearchFailure.class)); + assertTrue(response.isTimedOut()); + assertNull(response.getReasonCancelled()); + assertThat(client.scrollsCleared, contains(scrollId)); + } + + /** * Mimicks bulk indexing failures. */ @@ -370,6 +391,32 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { assertEquals(defaultBackoffBeforeFailing, millis); } + public void testRefreshIsFalseByDefault() throws Exception { + refreshTestCase(null, false); + } + + public void testRefreshFalseDoesntMakeVisible() throws Exception { + refreshTestCase(false, false); + } + + public void testRefreshTrueMakesVisible() throws Exception { + refreshTestCase(true, true); + } + + private void refreshTestCase(Boolean refresh, boolean shouldRefresh) { + if (refresh != null) { + mainRequest.setRefresh(refresh); + } + DummyAbstractAsyncBulkByScrollAction action = new DummyAbstractAsyncBulkByScrollAction(); + action.addDestinationIndices(singleton("foo")); + action.startNormalTermination(emptyList(), emptyList(), false); + if (shouldRefresh) { + assertArrayEquals(new String[] {"foo"}, client.lastRefreshRequest.get().indices()); + } else { + assertNull("No refresh was attempted", client.lastRefreshRequest.get()); + } + } + public void testCancelBeforeInitialSearch() throws Exception { cancelTaskCase((DummyAbstractAsyncBulkByScrollAction action) -> action.initialSearch()); } @@ -396,8 +443,8 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { public void testCancelBeforeStartNormalTermination() throws Exception { // Refresh or not doesn't matter - we don't try to refresh. mainRequest.setRefresh(usually()); - cancelTaskCase((DummyAbstractAsyncBulkByScrollAction action) -> action.startNormalTermination(emptyList(), emptyList())); - // This wouldn't return if we called refresh - the action would hang waiting for the refresh that we haven't mocked. + cancelTaskCase((DummyAbstractAsyncBulkByScrollAction action) -> action.startNormalTermination(emptyList(), emptyList(), false)); + assertNull("No refresh was attempted", client.lastRefreshRequest.get()); } private void cancelTaskCase(Consumer testMe) throws Exception { @@ -430,8 +477,8 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { @Override protected BulkIndexByScrollResponse buildResponse(TimeValue took, List indexingFailures, - List searchFailures) { - return new BulkIndexByScrollResponse(took, task.getStatus(), indexingFailures, searchFailures); + List searchFailures, boolean timedOut) { + return new BulkIndexByScrollResponse(took, task.getStatus(), indexingFailures, searchFailures, timedOut); } } @@ -445,6 +492,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { private static class MyMockClient extends FilterClient { private final List scrollsCleared = new ArrayList<>(); private final AtomicInteger bulksAttempts = new AtomicInteger(); + private final AtomicReference lastRefreshRequest = new AtomicReference<>(); private int bulksToReject = 0; @@ -457,6 +505,11 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { protected , Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder> void doExecute( Action action, Request request, ActionListener listener) { + if (request instanceof RefreshRequest) { + lastRefreshRequest.set((RefreshRequest) request); + listener.onResponse(null); + return; + } if (request instanceof ClearScrollRequest) { ClearScrollRequest clearScroll = (ClearScrollRequest) request; scrollsCleared.addAll(clearScroll.getScrollIds()); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTestUtils.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTestUtils.java index d1f6b1ee171..5117d2781be 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTestUtils.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTestUtils.java @@ -21,7 +21,10 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ListenableActionFuture; +import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskInfo; +import org.elasticsearch.index.reindex.BulkByScrollTask.Status; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.NativeScriptFactory; @@ -41,7 +44,10 @@ import java.util.concurrent.TimeoutException; import static java.util.Collections.emptyMap; import static org.elasticsearch.test.ESIntegTestCase.client; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.hasSize; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertThat; /** @@ -76,10 +82,26 @@ public class CancelTestUtils { // Wait until the script is on the second document. barrier.await(30, TimeUnit.SECONDS); + // Status should show running + ListTasksResponse tasksList = client().admin().cluster().prepareListTasks().setActions(actionToCancel).setDetailed(true).get(); + assertThat(tasksList.getNodeFailures(), empty()); + assertThat(tasksList.getTaskFailures(), empty()); + assertThat(tasksList.getTasks(), hasSize(1)); + BulkByScrollTask.Status status = (Status) tasksList.getTasks().get(0).getStatus(); + assertNull(status.getReasonCancelled()); + // Cancel the request while the script is running. This will prevent the request from being sent at all. List cancelledTasks = client().admin().cluster().prepareCancelTasks().setActions(actionToCancel).get().getTasks(); assertThat(cancelledTasks, hasSize(1)); + // The status should now show canceled. The request will still be in the list because the script is still blocked. + tasksList = client().admin().cluster().prepareListTasks().setActions(actionToCancel).setDetailed(true).get(); + assertThat(tasksList.getNodeFailures(), empty()); + assertThat(tasksList.getTaskFailures(), empty()); + assertThat(tasksList.getTasks(), hasSize(1)); + status = (Status) tasksList.getTasks().get(0).getStatus(); + assertEquals(CancelTasksRequest.DEFAULT_REASON, status.getReasonCancelled()); + // Now let the next document through. It won't be sent because the request is cancelled but we need to unblock the script. barrier.await(); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexBasicTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexBasicTests.java index 83dcd1483c1..c169f6819ea 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexBasicTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexBasicTests.java @@ -19,14 +19,12 @@ package org.elasticsearch.index.reindex; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder; import java.util.ArrayList; import java.util.List; import static org.elasticsearch.index.query.QueryBuilders.termQuery; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; public class ReindexBasicTests extends ReindexTestCase { @@ -84,40 +82,4 @@ public class ReindexBasicTests extends ReindexTestCase { assertThat(copy.get(), responseMatcher().created(half).batches(half, 5)); assertHitCount(client().prepareSearch("dest").setTypes("half").setSize(0).get(), half); } - - public void testRefreshIsFalseByDefault() throws Exception { - refreshTestCase(null, false); - } - - public void testRefreshFalseDoesntMakeVisible() throws Exception { - refreshTestCase(false, false); - } - - public void testRefreshTrueMakesVisible() throws Exception { - refreshTestCase(true, true); - } - - /** - * Executes a reindex into an index with -1 refresh_interval and checks that - * the documents are visible properly. - */ - private void refreshTestCase(Boolean refresh, boolean visible) throws Exception { - CreateIndexRequestBuilder create = client().admin().indices().prepareCreate("dest").setSettings("refresh_interval", -1); - assertAcked(create); - ensureYellow(); - indexRandom(true, client().prepareIndex("source", "test", "1").setSource("foo", "a"), - client().prepareIndex("source", "test", "2").setSource("foo", "a"), - client().prepareIndex("source", "test", "3").setSource("foo", "b"), - client().prepareIndex("source", "test", "4").setSource("foo", "c")); - assertHitCount(client().prepareSearch("source").setSize(0).get(), 4); - - // Copy all the docs - ReindexRequestBuilder copy = reindex().source("source").destination("dest", "all"); - if (refresh != null) { - copy.refresh(refresh); - } - assertThat(copy.get(), responseMatcher().created(4)); - - assertHitCount(client().prepareSearch("dest").setTypes("all").setSize(0).get(), visible ? 4 : 0); - } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java index f5c31fe8f42..6e1cbb59e86 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java @@ -102,7 +102,7 @@ public class RoundTripTests extends ESTestCase { public void testReindexResponse() throws IOException { ReindexResponse response = new ReindexResponse(timeValueMillis(randomPositiveLong()), randomStatus(), randomIndexingFailures(), - randomSearchFailures()); + randomSearchFailures(), randomBoolean()); ReindexResponse tripped = new ReindexResponse(); roundTrip(response, tripped); assertResponseEquals(response, tripped); @@ -110,7 +110,7 @@ public class RoundTripTests extends ESTestCase { public void testBulkIndexByScrollResponse() throws IOException { BulkIndexByScrollResponse response = new BulkIndexByScrollResponse(timeValueMillis(randomPositiveLong()), randomStatus(), - randomIndexingFailures(), randomSearchFailures()); + randomIndexingFailures(), randomSearchFailures(), randomBoolean()); BulkIndexByScrollResponse tripped = new BulkIndexByScrollResponse(); roundTrip(response, tripped); assertResponseEquals(response, tripped); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryBasicTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryBasicTests.java index e49afafca46..096967149fb 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryBasicTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryBasicTests.java @@ -19,12 +19,9 @@ package org.elasticsearch.index.reindex; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.search.sort.SortOrder; -import static org.elasticsearch.index.query.QueryBuilders.matchQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; public class UpdateByQueryBasicTests extends UpdateByQueryTestCase { @@ -64,44 +61,4 @@ public class UpdateByQueryBasicTests extends UpdateByQueryTestCase { assertEquals(3, client().prepareGet("test", "test", "3").get().getVersion()); assertEquals(2, client().prepareGet("test", "test", "4").get().getVersion()); } - - public void testRefreshIsFalseByDefault() throws Exception { - refreshTestCase(null, false); - } - - public void testRefreshFalseDoesntMakeVisible() throws Exception { - refreshTestCase(false, false); - } - - public void testRefreshTrueMakesVisible() throws Exception { - refreshTestCase(true, true); - } - - /** - * Executes an update_by_query on an index with -1 refresh_interval and - * checks that the documents are visible properly. - */ - private void refreshTestCase(Boolean refresh, boolean visible) throws Exception { - CreateIndexRequestBuilder create = client().admin().indices().prepareCreate("test").setSettings("refresh_interval", -1); - create.addMapping("test", "{\"dynamic\": \"false\"}"); - assertAcked(create); - ensureYellow(); - indexRandom(true, client().prepareIndex("test", "test", "1").setSource("foo", "a"), - client().prepareIndex("test", "test", "2").setSource("foo", "a"), - client().prepareIndex("test", "test", "3").setSource("foo", "b"), - client().prepareIndex("test", "test", "4").setSource("foo", "c")); - assertHitCount(client().prepareSearch("test").setQuery(matchQuery("foo", "a")).setSize(0).get(), 0); - - // Now make foo searchable - assertAcked(client().admin().indices().preparePutMapping("test").setType("test") - .setSource("{\"test\": {\"properties\":{\"foo\": {\"type\": \"text\"}}}}")); - UpdateByQueryRequestBuilder update = request().source("test"); - if (refresh != null) { - update.refresh(refresh); - } - assertThat(update.get(), responseMatcher().updated(4)); - - assertHitCount(client().prepareSearch("test").setQuery(matchQuery("foo", "a")).setSize(0).get(), visible ? 2 : 0); - } - } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryRequestTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryRequestTests.java new file mode 100644 index 00000000000..f6780729143 --- /dev/null +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryRequestTests.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.reindex; + +import java.util.List; + +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.test.ESTestCase; + +import static org.apache.lucene.util.TestUtil.randomSimpleString; +import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.sameInstance; + +public class UpdateByQueryRequestTests extends ESTestCase { + public void testUpdateByQueryRequestImplementsCompositeIndicesRequestWithDummies() { + int numIndices = between(1, 100); + String[] indices = new String[numIndices]; + for (int i = 0; i < numIndices; i++) { + indices[i] = randomSimpleString(random(), 1, 30); + } + UpdateByQueryRequest request = new UpdateByQueryRequest(new SearchRequest(indices)); + List subRequests = request.subRequests(); + assertThat(subRequests, hasSize(numIndices + 1)); + for (int i = 0; i < numIndices; i++) { + assertThat(subRequests.get(i).indices(), arrayWithSize(1)); + assertEquals(indices[i], subRequests.get(i).indices()[0]); + } + assertThat(subRequests.get(numIndices), sameInstance(request.getSearchRequest())); + } +} diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yaml index c23e5da95a1..31e97967af0 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yaml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yaml @@ -58,9 +58,6 @@ --- "wait_for_completion=false": - - skip: - version: "0.0.0 - " - reason: breaks other tests by leaving a running reindex behind - do: index: index: source @@ -78,7 +75,9 @@ index: source dest: index: dest + - is_false: timed_out - match: {task: '/.+:\d+/'} + - set: {task: task} - is_false: updated - is_false: version_conflicts - is_false: batches @@ -87,6 +86,11 @@ - is_false: took - is_false: created + - do: + tasks.list: + wait_for_completion: true + task_id: $task + --- "Response format for version conflict": - do: @@ -122,7 +126,8 @@ - match: {failures.0.id: "1"} - match: {failures.0.status: 409} - match: {failures.0.cause.type: version_conflict_engine_exception} - - match: {failures.0.cause.reason: "[foo][1]: version conflict, document already exists (current version [1])"} + # Use a regex so we don't mind if the version isn't always 1. Sometimes it comes out 2. + - match: {failures.0.cause.reason: "/\\[foo\\]\\[1\\]:.version.conflict,.document.already.exists.\\(current.version.\\[\\d+\\]\\)/"} - match: {failures.0.cause.shard: /\d+/} - match: {failures.0.cause.index: dest} - is_true: took diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/update-by-query/10_basic.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/update-by-query/10_basic.yaml index 383e945bbf2..65db8a5e66f 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/update-by-query/10_basic.yaml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/update-by-query/10_basic.yaml @@ -12,6 +12,7 @@ - do: update-by-query: index: test + - is_false: timed_out - match: {updated: 1} - match: {version_conflicts: 0} - match: {batches: 1} @@ -37,6 +38,7 @@ wait_for_completion: false index: test - match: {task: '/.+:\d+/'} + - set: {task: task} - is_false: updated - is_false: version_conflicts - is_false: batches @@ -45,6 +47,11 @@ - is_false: took - is_false: created + - do: + tasks.list: + wait_for_completion: true + task_id: $task + --- "Response for version conflict": - do: @@ -80,7 +87,8 @@ - match: {failures.0.id: "1"} - match: {failures.0.status: 409} - match: {failures.0.cause.type: version_conflict_engine_exception} - - match: {failures.0.cause.reason: "[foo][1]: version conflict, current version [2] is different than the one provided [1]"} + # Use a regex so we don't mind if the current version isn't always 1. Sometimes it comes out 2. + - match: {failures.0.cause.reason: "/\\[foo\\]\\[1\\]:.version.conflict,.current.version.\\[\\d+\\].is.different.than.the.one.provided.\\[\\d+\\]/"} - match: {failures.0.cause.shard: /\d+/} - match: {failures.0.cause.index: test} - is_true: took diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-5.5.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-5.5.0.jar.sha1 deleted file mode 100644 index 18440dcdc04..00000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-5.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -69a6e72d322b6643f1b419e6c9cc46623a2404e9 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.0.0-snapshot-f0aa4fc.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..f2e307d5d98 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +1378905632ff45a9887b267c4b30f7adef415ca4 \ No newline at end of file diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuTransformTokenFilterFactory.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuTransformTokenFilterFactory.java index 6ecdf3888e9..f145ad4ae30 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuTransformTokenFilterFactory.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuTransformTokenFilterFactory.java @@ -22,7 +22,6 @@ package org.elasticsearch.index.analysis; import com.ibm.icu.text.Transliterator; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.icu.ICUTransformFilter; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -36,7 +35,6 @@ public class IcuTransformTokenFilterFactory extends AbstractTokenFilterFactory { private final int dir; private final Transliterator transliterator; - @Inject public IcuTransformTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); this.id = settings.get("id", "Null"); diff --git a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/AnalysisTestUtils.java b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/AnalysisTestUtils.java deleted file mode 100644 index a952c8982cc..00000000000 --- a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/AnalysisTestUtils.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.analysis; - -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.inject.Injector; -import org.elasticsearch.common.inject.ModulesBuilder; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsModule; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.EnvironmentModule; -import org.elasticsearch.index.Index; -import org.elasticsearch.indices.analysis.AnalysisModule; -import org.elasticsearch.plugin.analysis.icu.AnalysisICUPlugin; -import org.elasticsearch.test.IndexSettingsModule; - -import java.io.IOException; - -import static org.elasticsearch.common.settings.Settings.settingsBuilder; - -public class AnalysisTestUtils { - - public static AnalysisService createAnalysisService(Settings settings) throws IOException { - Index index = new Index("test", "_na_"); - Settings indexSettings = settingsBuilder().put(settings) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .build(); - AnalysisModule analysisModule = new AnalysisModule(new Environment(settings)); - new AnalysisICUPlugin().onModule(analysisModule); - Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings), - new EnvironmentModule(new Environment(settings)), analysisModule) - .createInjector(); - final AnalysisService analysisService = parentInjector.getInstance(AnalysisRegistry.class).build(IndexSettingsModule.newIndexSettings(index, indexSettings)); - return analysisService; - } -} diff --git a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/IndexableBinaryStringToolsTests.java b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/IndexableBinaryStringToolsTests.java index 24890fed5a9..5f3e1644481 100644 --- a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/IndexableBinaryStringToolsTests.java +++ b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/IndexableBinaryStringToolsTests.java @@ -23,7 +23,6 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope; import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.RamUsageEstimator; import org.apache.lucene.util.TimeUnits; import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter; import org.junit.BeforeClass; @@ -110,14 +109,14 @@ public class IndexableBinaryStringToolsTests extends LuceneTestCase { int encodedLen1 = IndexableBinaryStringTools.getEncodedLength( originalArray1, 0, numBytes1); if (encodedLen1 > encoded1.length) - encoded1 = new char[ArrayUtil.oversize(encodedLen1, RamUsageEstimator.NUM_BYTES_CHAR)]; + encoded1 = new char[ArrayUtil.oversize(encodedLen1, Character.BYTES)]; IndexableBinaryStringTools.encode(originalArray1, 0, numBytes1, encoded1, 0, encodedLen1); int encodedLen2 = IndexableBinaryStringTools.getEncodedLength(original2, 0, numBytes2); if (encodedLen2 > encoded2.length) - encoded2 = new char[ArrayUtil.oversize(encodedLen2, RamUsageEstimator.NUM_BYTES_CHAR)]; + encoded2 = new char[ArrayUtil.oversize(encodedLen2, Character.BYTES)]; IndexableBinaryStringTools.encode(original2, 0, numBytes2, encoded2, 0, encodedLen2); @@ -196,7 +195,7 @@ public class IndexableBinaryStringToolsTests extends LuceneTestCase { int encodedLen = IndexableBinaryStringTools.getEncodedLength(binary, 0, numBytes); if (encoded.length < encodedLen) - encoded = new char[ArrayUtil.oversize(encodedLen, RamUsageEstimator.NUM_BYTES_CHAR)]; + encoded = new char[ArrayUtil.oversize(encodedLen, Character.BYTES)]; IndexableBinaryStringTools.encode(binary, 0, numBytes, encoded, 0, encodedLen); diff --git a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/SimpleIcuAnalysisTests.java b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/SimpleIcuAnalysisTests.java index efd60427e23..b399dfd34f4 100644 --- a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/SimpleIcuAnalysisTests.java +++ b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/SimpleIcuAnalysisTests.java @@ -20,21 +20,19 @@ package org.elasticsearch.index.analysis; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; +import org.elasticsearch.index.Index; +import org.elasticsearch.plugin.analysis.icu.AnalysisICUPlugin; import org.elasticsearch.test.ESTestCase; import java.io.IOException; -import static org.elasticsearch.common.settings.Settings.settingsBuilder; -import static org.elasticsearch.index.analysis.AnalysisTestUtils.createAnalysisService; import static org.hamcrest.Matchers.instanceOf; /** */ public class SimpleIcuAnalysisTests extends ESTestCase { public void testDefaultsIcuAnalysis() throws IOException { - Settings settings = settingsBuilder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()).build(); - AnalysisService analysisService = createAnalysisService(settings); + AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), + Settings.EMPTY, new AnalysisICUPlugin()::onModule); TokenizerFactory tokenizerFactory = analysisService.tokenizer("icu_tokenizer"); assertThat(tokenizerFactory, instanceOf(IcuTokenizerFactory.class)); diff --git a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/SimpleIcuCollationTokenFilterTests.java b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/SimpleIcuCollationTokenFilterTests.java index 632f3f539d6..adf1faaf92f 100644 --- a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/SimpleIcuCollationTokenFilterTests.java +++ b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/SimpleIcuCollationTokenFilterTests.java @@ -27,13 +27,13 @@ import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.KeywordTokenizer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; +import org.elasticsearch.index.Index; +import org.elasticsearch.plugin.analysis.icu.AnalysisICUPlugin; import org.elasticsearch.test.ESTestCase; import java.io.IOException; import java.io.StringReader; -import static org.elasticsearch.index.analysis.AnalysisTestUtils.createAnalysisService; import static org.hamcrest.Matchers.equalTo; // Tests borrowed from Solr's Icu collation key filter factory test. @@ -46,12 +46,11 @@ public class SimpleIcuCollationTokenFilterTests extends ESTestCase { */ public void testBasicUsage() throws Exception { Settings settings = Settings.settingsBuilder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .put("index.analysis.filter.myCollator.type", "icu_collation") .put("index.analysis.filter.myCollator.language", "tr") .put("index.analysis.filter.myCollator.strength", "primary") .build(); - AnalysisService analysisService = createAnalysisService(settings); + AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisICUPlugin()::onModule); TokenFilterFactory filterFactory = analysisService.tokenFilter("myCollator"); assertCollatesToSame(filterFactory, "I WİLL USE TURKİSH CASING", "ı will use turkish casıng"); @@ -62,13 +61,12 @@ public class SimpleIcuCollationTokenFilterTests extends ESTestCase { */ public void testNormalization() throws IOException { Settings settings = Settings.settingsBuilder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .put("index.analysis.filter.myCollator.type", "icu_collation") .put("index.analysis.filter.myCollator.language", "tr") .put("index.analysis.filter.myCollator.strength", "primary") .put("index.analysis.filter.myCollator.decomposition", "canonical") .build(); - AnalysisService analysisService = createAnalysisService(settings); + AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisICUPlugin()::onModule); TokenFilterFactory filterFactory = analysisService.tokenFilter("myCollator"); assertCollatesToSame(filterFactory, "I W\u0049\u0307LL USE TURKİSH CASING", "ı will use turkish casıng"); @@ -79,13 +77,12 @@ public class SimpleIcuCollationTokenFilterTests extends ESTestCase { */ public void testSecondaryStrength() throws IOException { Settings settings = Settings.settingsBuilder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .put("index.analysis.filter.myCollator.type", "icu_collation") .put("index.analysis.filter.myCollator.language", "en") .put("index.analysis.filter.myCollator.strength", "secondary") .put("index.analysis.filter.myCollator.decomposition", "no") .build(); - AnalysisService analysisService = createAnalysisService(settings); + AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisICUPlugin()::onModule); TokenFilterFactory filterFactory = analysisService.tokenFilter("myCollator"); assertCollatesToSame(filterFactory, "TESTING", "testing"); @@ -97,13 +94,12 @@ public class SimpleIcuCollationTokenFilterTests extends ESTestCase { */ public void testIgnorePunctuation() throws IOException { Settings settings = Settings.settingsBuilder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .put("index.analysis.filter.myCollator.type", "icu_collation") .put("index.analysis.filter.myCollator.language", "en") .put("index.analysis.filter.myCollator.strength", "primary") .put("index.analysis.filter.myCollator.alternate", "shifted") .build(); - AnalysisService analysisService = createAnalysisService(settings); + AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisICUPlugin()::onModule); TokenFilterFactory filterFactory = analysisService.tokenFilter("myCollator"); assertCollatesToSame(filterFactory, "foo-bar", "foo bar"); @@ -115,14 +111,13 @@ public class SimpleIcuCollationTokenFilterTests extends ESTestCase { */ public void testIgnoreWhitespace() throws IOException { Settings settings = Settings.settingsBuilder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .put("index.analysis.filter.myCollator.type", "icu_collation") .put("index.analysis.filter.myCollator.language", "en") .put("index.analysis.filter.myCollator.strength", "primary") .put("index.analysis.filter.myCollator.alternate", "shifted") .put("index.analysis.filter.myCollator.variableTop", " ") .build(); - AnalysisService analysisService = createAnalysisService(settings); + AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisICUPlugin()::onModule); TokenFilterFactory filterFactory = analysisService.tokenFilter("myCollator"); assertCollatesToSame(filterFactory, "foo bar", "foobar"); @@ -136,12 +131,11 @@ public class SimpleIcuCollationTokenFilterTests extends ESTestCase { */ public void testNumerics() throws IOException { Settings settings = Settings.settingsBuilder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .put("index.analysis.filter.myCollator.type", "icu_collation") .put("index.analysis.filter.myCollator.language", "en") .put("index.analysis.filter.myCollator.numeric", "true") .build(); - AnalysisService analysisService = createAnalysisService(settings); + AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisICUPlugin()::onModule); TokenFilterFactory filterFactory = analysisService.tokenFilter("myCollator"); assertCollation(filterFactory, "foobar-9", "foobar-10", -1); @@ -153,13 +147,12 @@ public class SimpleIcuCollationTokenFilterTests extends ESTestCase { */ public void testIgnoreAccentsButNotCase() throws IOException { Settings settings = Settings.settingsBuilder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .put("index.analysis.filter.myCollator.type", "icu_collation") .put("index.analysis.filter.myCollator.language", "en") .put("index.analysis.filter.myCollator.strength", "primary") .put("index.analysis.filter.myCollator.caseLevel", "true") .build(); - AnalysisService analysisService = createAnalysisService(settings); + AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisICUPlugin()::onModule); TokenFilterFactory filterFactory = analysisService.tokenFilter("myCollator"); assertCollatesToSame(filterFactory, "résumé", "resume"); @@ -174,13 +167,12 @@ public class SimpleIcuCollationTokenFilterTests extends ESTestCase { */ public void testUpperCaseFirst() throws IOException { Settings settings = Settings.settingsBuilder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .put("index.analysis.filter.myCollator.type", "icu_collation") .put("index.analysis.filter.myCollator.language", "en") .put("index.analysis.filter.myCollator.strength", "tertiary") .put("index.analysis.filter.myCollator.caseFirst", "upper") .build(); - AnalysisService analysisService = createAnalysisService(settings); + AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisICUPlugin()::onModule); TokenFilterFactory filterFactory = analysisService.tokenFilter("myCollator"); assertCollation(filterFactory, "Resume", "resume", -1); @@ -204,12 +196,11 @@ public class SimpleIcuCollationTokenFilterTests extends ESTestCase { String tailoredRules = tailoredCollator.getRules(); Settings settings = Settings.settingsBuilder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .put("index.analysis.filter.myCollator.type", "icu_collation") .put("index.analysis.filter.myCollator.rules", tailoredRules) .put("index.analysis.filter.myCollator.strength", "primary") .build(); - AnalysisService analysisService = createAnalysisService(settings); + AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisICUPlugin()::onModule); TokenFilterFactory filterFactory = analysisService.tokenFilter("myCollator"); assertCollatesToSame(filterFactory, "Töne", "Toene"); diff --git a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/SimpleIcuNormalizerCharFilterTests.java b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/SimpleIcuNormalizerCharFilterTests.java index 7ebb783d1db..749b04b2260 100644 --- a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/SimpleIcuNormalizerCharFilterTests.java +++ b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/SimpleIcuNormalizerCharFilterTests.java @@ -22,12 +22,12 @@ package org.elasticsearch.index.analysis; import com.ibm.icu.text.Normalizer2; import org.apache.lucene.analysis.CharFilter; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; +import org.elasticsearch.index.Index; +import org.elasticsearch.plugin.analysis.icu.AnalysisICUPlugin; import org.elasticsearch.test.ESTestCase; import java.io.StringReader; -import static org.elasticsearch.index.analysis.AnalysisTestUtils.createAnalysisService; /** * Test @@ -35,10 +35,9 @@ import static org.elasticsearch.index.analysis.AnalysisTestUtils.createAnalysisS public class SimpleIcuNormalizerCharFilterTests extends ESTestCase { public void testDefaultSetting() throws Exception { Settings settings = Settings.settingsBuilder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .put("index.analysis.char_filter.myNormalizerChar.type", "icu_normalizer") .build(); - AnalysisService analysisService = createAnalysisService(settings); + AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisICUPlugin()::onModule); CharFilterFactory charFilterFactory = analysisService.charFilter("myNormalizerChar"); String input = "ʰ㌰゙5℃№㈱㌘,バッファーの正規化のテスト.㋐㋑㋒㋓㋔カキクケコザジズゼゾg̈각/각நிเกषिchkʷक्षि"; @@ -58,12 +57,11 @@ public class SimpleIcuNormalizerCharFilterTests extends ESTestCase { public void testNameAndModeSetting() throws Exception { Settings settings = Settings.settingsBuilder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .put("index.analysis.char_filter.myNormalizerChar.type", "icu_normalizer") .put("index.analysis.char_filter.myNormalizerChar.name", "nfkc") .put("index.analysis.char_filter.myNormalizerChar.mode", "decompose") .build(); - AnalysisService analysisService = createAnalysisService(settings); + AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, new AnalysisICUPlugin()::onModule); CharFilterFactory charFilterFactory = analysisService.charFilter("myNormalizerChar"); String input = "ʰ㌰゙5℃№㈱㌘,バッファーの正規化のテスト.㋐㋑㋒㋓㋔カキクケコザジズゼゾg̈각/각நிเกषिchkʷक्षि"; diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-5.5.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-5.5.0.jar.sha1 deleted file mode 100644 index 832db46564e..00000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-5.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e9d68dd5d9fae3349b81de5952d0ee8115c696a4 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.0.0-snapshot-f0aa4fc.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..7bf3eb5333d --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +49acd38e206d9c2fe28269fcba9b752d3b605e0e \ No newline at end of file diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiAnalyzerProvider.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiAnalyzerProvider.java index 8aa8ff3c1dd..21d9b804055 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiAnalyzerProvider.java +++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiAnalyzerProvider.java @@ -23,7 +23,6 @@ import org.apache.lucene.analysis.ja.JapaneseAnalyzer; import org.apache.lucene.analysis.ja.JapaneseTokenizer; import org.apache.lucene.analysis.ja.dict.UserDictionary; import org.apache.lucene.analysis.util.CharArraySet; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -36,7 +35,6 @@ public class KuromojiAnalyzerProvider extends AbstractIndexAnalyzerProvider stopWords = Analysis.parseStopWords(env, settings, JapaneseAnalyzer.getDefaultStopSet()); diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiBaseFormFilterFactory.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiBaseFormFilterFactory.java index e191d78198f..aa035d9edfd 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiBaseFormFilterFactory.java +++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiBaseFormFilterFactory.java @@ -21,14 +21,12 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.ja.JapaneseBaseFormFilter; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; public class KuromojiBaseFormFilterFactory extends AbstractTokenFilterFactory { - @Inject public KuromojiBaseFormFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); } diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiKatakanaStemmerFactory.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiKatakanaStemmerFactory.java index ebebdcb6bba..491f48e34c1 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiKatakanaStemmerFactory.java +++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiKatakanaStemmerFactory.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.ja.JapaneseKatakanaStemFilter; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -30,7 +29,6 @@ public class KuromojiKatakanaStemmerFactory extends AbstractTokenFilterFactory { private final int minimumLength; - @Inject public KuromojiKatakanaStemmerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); minimumLength = settings.getAsInt("minimum_length", JapaneseKatakanaStemFilter.DEFAULT_MINIMUM_LENGTH); diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiReadingFormFilterFactory.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiReadingFormFilterFactory.java index 59d1088fd1b..d0eb0cecdb9 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiReadingFormFilterFactory.java +++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiReadingFormFilterFactory.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.ja.JapaneseReadingFormFilter; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -30,7 +29,6 @@ public class KuromojiReadingFormFilterFactory extends AbstractTokenFilterFactory private final boolean useRomaji; - @Inject public KuromojiReadingFormFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); useRomaji = settings.getAsBoolean("use_romaji", false); diff --git a/plugins/analysis-kuromoji/src/test/java/org/elasticsearch/index/analysis/KuromojiAnalysisTests.java b/plugins/analysis-kuromoji/src/test/java/org/elasticsearch/index/analysis/KuromojiAnalysisTests.java index 0b3f026b010..b81de20d73d 100644 --- a/plugins/analysis-kuromoji/src/test/java/org/elasticsearch/index/analysis/KuromojiAnalysisTests.java +++ b/plugins/analysis-kuromoji/src/test/java/org/elasticsearch/index/analysis/KuromojiAnalysisTests.java @@ -198,18 +198,20 @@ public class KuromojiAnalysisTests extends ESTestCase { String json = "/org/elasticsearch/index/analysis/kuromoji_analysis.json"; Settings settings = Settings.settingsBuilder() - .put(Environment.PATH_HOME_SETTING.getKey(), home) .loadFromStream(json, getClass().getResourceAsStream(json)) .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .build(); - final SettingsModule settingsModule = new SettingsModule(settings); + Settings nodeSettings = Settings.settingsBuilder() + .put(Environment.PATH_HOME_SETTING.getKey(), home).build(); + final SettingsModule settingsModule = new SettingsModule(nodeSettings); settingsModule.registerSetting(InternalSettingsPlugin.VERSION_CREATED); Index index = new Index("test", "_na_"); - AnalysisModule analysisModule = new AnalysisModule(new Environment(settings)); + Environment environment = new Environment(nodeSettings); + AnalysisModule analysisModule = new AnalysisModule(environment); new AnalysisKuromojiPlugin().onModule(analysisModule); Injector parentInjector = new ModulesBuilder().add(settingsModule, - new EnvironmentModule(new Environment(settings)), analysisModule) + new EnvironmentModule(environment), analysisModule) .createInjector(); return parentInjector.getInstance(AnalysisRegistry.class).build(IndexSettingsModule.newIndexSettings(index, settings)); diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-5.5.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-5.5.0.jar.sha1 deleted file mode 100644 index 3436526863d..00000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-5.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c4735c43440ebcb20f2b6f49f508fedc12f5366c \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.0.0-snapshot-f0aa4fc.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..8f08fe26980 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +7c11723d7d4dc3b1c9bf80089cfc2de7bc8a2b6e \ No newline at end of file diff --git a/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/PhoneticTokenFilterFactory.java b/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/PhoneticTokenFilterFactory.java index e33f1f1e7e2..75da19c0a3c 100644 --- a/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/PhoneticTokenFilterFactory.java +++ b/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/PhoneticTokenFilterFactory.java @@ -38,7 +38,6 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.phonetic.BeiderMorseFilter; import org.apache.lucene.analysis.phonetic.DoubleMetaphoneFilter; import org.apache.lucene.analysis.phonetic.PhoneticFilter; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -58,7 +57,6 @@ public class PhoneticTokenFilterFactory extends AbstractTokenFilterFactory { private NameType nametype; private RuleType ruletype; - @Inject public PhoneticTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); this.languageset = null; diff --git a/plugins/analysis-phonetic/src/test/java/org/elasticsearch/index/analysis/SimplePhoneticAnalysisTests.java b/plugins/analysis-phonetic/src/test/java/org/elasticsearch/index/analysis/SimplePhoneticAnalysisTests.java index f3d1d12f45a..688394b6844 100644 --- a/plugins/analysis-phonetic/src/test/java/org/elasticsearch/index/analysis/SimplePhoneticAnalysisTests.java +++ b/plugins/analysis-phonetic/src/test/java/org/elasticsearch/index/analysis/SimplePhoneticAnalysisTests.java @@ -21,18 +21,10 @@ package org.elasticsearch.index.analysis; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.inject.Injector; -import org.elasticsearch.common.inject.ModulesBuilder; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsModule; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.EnvironmentModule; import org.elasticsearch.index.Index; -import org.elasticsearch.indices.analysis.AnalysisModule; import org.elasticsearch.plugin.analysis.AnalysisPhoneticPlugin; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.IndexSettingsModule; -import org.elasticsearch.test.InternalSettingsPlugin; import org.hamcrest.MatcherAssert; import java.io.IOException; @@ -47,22 +39,10 @@ public class SimplePhoneticAnalysisTests extends ESTestCase { String yaml = "/org/elasticsearch/index/analysis/phonetic-1.yml"; Settings settings = settingsBuilder().loadFromStream(yaml, getClass().getResourceAsStream(yaml)) .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .build(); - AnalysisService analysisService = testSimpleConfiguration(settings); + AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), settings, + new AnalysisPhoneticPlugin()::onModule); TokenFilterFactory filterFactory = analysisService.tokenFilter("phonetic"); MatcherAssert.assertThat(filterFactory, instanceOf(PhoneticTokenFilterFactory.class)); } - - private AnalysisService testSimpleConfiguration(Settings settings) throws IOException { - Index index = new Index("test", "_na_"); - AnalysisModule analysisModule = new AnalysisModule(new Environment(settings)); - new AnalysisPhoneticPlugin().onModule(analysisModule); - SettingsModule settingsModule = new SettingsModule(settings); - settingsModule.registerSetting(InternalSettingsPlugin.VERSION_CREATED); - Injector parentInjector = new ModulesBuilder().add(settingsModule, - new EnvironmentModule(new Environment(settings)), analysisModule) - .createInjector(); - return parentInjector.getInstance(AnalysisRegistry.class).build(IndexSettingsModule.newIndexSettings(index, settings)); - } } diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-5.5.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-5.5.0.jar.sha1 deleted file mode 100644 index 95b85f7edbd..00000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-5.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a31a4d1476d45738a460374d9801dc5ed9b49c1a \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.0.0-snapshot-f0aa4fc.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..bf5e5da8dcf --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +654d961bd4975a3cb13388d86d72fefb6994f659 \ No newline at end of file diff --git a/plugins/analysis-smartcn/src/main/java/org/elasticsearch/index/analysis/SmartChineseAnalyzerProvider.java b/plugins/analysis-smartcn/src/main/java/org/elasticsearch/index/analysis/SmartChineseAnalyzerProvider.java index 22fcf238725..591912b8fa3 100644 --- a/plugins/analysis-smartcn/src/main/java/org/elasticsearch/index/analysis/SmartChineseAnalyzerProvider.java +++ b/plugins/analysis-smartcn/src/main/java/org/elasticsearch/index/analysis/SmartChineseAnalyzerProvider.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.cn.smart.SmartChineseAnalyzer; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -31,7 +30,6 @@ public class SmartChineseAnalyzerProvider extends AbstractIndexAnalyzerProvider< private final SmartChineseAnalyzer analyzer; - @Inject public SmartChineseAnalyzerProvider(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); diff --git a/plugins/analysis-smartcn/src/test/java/org/elasticsearch/index/analysis/SimpleSmartChineseAnalysisTests.java b/plugins/analysis-smartcn/src/test/java/org/elasticsearch/index/analysis/SimpleSmartChineseAnalysisTests.java index 76761a67c9f..0fcc42643d4 100644 --- a/plugins/analysis-smartcn/src/test/java/org/elasticsearch/index/analysis/SimpleSmartChineseAnalysisTests.java +++ b/plugins/analysis-smartcn/src/test/java/org/elasticsearch/index/analysis/SimpleSmartChineseAnalysisTests.java @@ -19,44 +19,21 @@ package org.elasticsearch.index.analysis; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.inject.Injector; -import org.elasticsearch.common.inject.ModulesBuilder; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsModule; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.EnvironmentModule; import org.elasticsearch.index.Index; -import org.elasticsearch.indices.analysis.AnalysisModule; import org.elasticsearch.plugin.analysis.smartcn.AnalysisSmartChinesePlugin; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.IndexSettingsModule; -import org.elasticsearch.test.InternalSettingsPlugin; import org.hamcrest.MatcherAssert; import java.io.IOException; -import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.hamcrest.Matchers.instanceOf; /** */ public class SimpleSmartChineseAnalysisTests extends ESTestCase { public void testDefaultsIcuAnalysis() throws IOException { - Index index = new Index("test", "_na_"); - Settings settings = settingsBuilder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .build(); - AnalysisModule analysisModule = new AnalysisModule(new Environment(settings)); - new AnalysisSmartChinesePlugin().onModule(analysisModule); - SettingsModule settingsModule = new SettingsModule(settings); - settingsModule.registerSetting(InternalSettingsPlugin.VERSION_CREATED); - Injector parentInjector = new ModulesBuilder().add(settingsModule, - new EnvironmentModule(new Environment(settings)), analysisModule) - .createInjector(); - final AnalysisService analysisService = parentInjector.getInstance(AnalysisRegistry.class).build(IndexSettingsModule.newIndexSettings(index, settings)); + final AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), Settings.EMPTY, new AnalysisSmartChinesePlugin()::onModule); TokenizerFactory tokenizerFactory = analysisService.tokenizer("smartcn_tokenizer"); MatcherAssert.assertThat(tokenizerFactory, instanceOf(SmartChineseTokenizerTokenizerFactory.class)); } diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-5.5.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-5.5.0.jar.sha1 deleted file mode 100644 index d5a28231e65..00000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-5.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1a7505d011aca54c004d0fc86a490d5f054bb903 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.0.0-snapshot-f0aa4fc.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.0.0-snapshot-f0aa4fc.jar.sha1 new file mode 100644 index 00000000000..ed0dc51b97c --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.0.0-snapshot-f0aa4fc.jar.sha1 @@ -0,0 +1 @@ +0f408ac498782617a0f80d6a295d82f6d3609499 \ No newline at end of file diff --git a/plugins/analysis-stempel/src/test/java/org/elasticsearch/index/analysis/PolishAnalysisTests.java b/plugins/analysis-stempel/src/test/java/org/elasticsearch/index/analysis/PolishAnalysisTests.java index 8f76c908e4b..9bfcc2c2f3f 100644 --- a/plugins/analysis-stempel/src/test/java/org/elasticsearch/index/analysis/PolishAnalysisTests.java +++ b/plugins/analysis-stempel/src/test/java/org/elasticsearch/index/analysis/PolishAnalysisTests.java @@ -21,46 +21,22 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.pl.PolishAnalyzer; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.inject.Injector; -import org.elasticsearch.common.inject.ModulesBuilder; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsModule; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.EnvironmentModule; +import org.elasticsearch.index.Index; import org.elasticsearch.index.analysis.pl.PolishStemTokenFilterFactory; -import org.elasticsearch.indices.analysis.AnalysisModule; import org.elasticsearch.plugin.analysis.stempel.AnalysisStempelPlugin; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.IndexSettingsModule; -import org.elasticsearch.test.InternalSettingsPlugin; import org.hamcrest.MatcherAssert; import java.io.IOException; -import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.hamcrest.Matchers.instanceOf; /** */ public class PolishAnalysisTests extends ESTestCase { public void testDefaultsPolishAnalysis() throws IOException { - Settings settings = settingsBuilder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .build(); - - - AnalysisModule analysisModule = new AnalysisModule(new Environment(settings)); - new AnalysisStempelPlugin().onModule(analysisModule); - SettingsModule settingsModule = new SettingsModule(settings); - settingsModule.registerSetting(InternalSettingsPlugin.VERSION_CREATED); - Injector parentInjector = new ModulesBuilder().add(settingsModule, - new EnvironmentModule(new Environment(settings)), analysisModule) - .createInjector(); - - final AnalysisService analysisService = parentInjector.getInstance(AnalysisRegistry.class).build(IndexSettingsModule.newIndexSettings("test", settings)); + final AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), Settings.EMPTY, new AnalysisStempelPlugin()::onModule); TokenFilterFactory tokenizerFactory = analysisService.tokenFilter("polish_stem"); MatcherAssert.assertThat(tokenizerFactory, instanceOf(PolishStemTokenFilterFactory.class)); diff --git a/plugins/analysis-stempel/src/test/java/org/elasticsearch/index/analysis/SimplePolishTokenFilterTests.java b/plugins/analysis-stempel/src/test/java/org/elasticsearch/index/analysis/SimplePolishTokenFilterTests.java index 890f4eceec1..193cfea6811 100644 --- a/plugins/analysis-stempel/src/test/java/org/elasticsearch/index/analysis/SimplePolishTokenFilterTests.java +++ b/plugins/analysis-stempel/src/test/java/org/elasticsearch/index/analysis/SimplePolishTokenFilterTests.java @@ -24,20 +24,10 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.KeywordTokenizer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.inject.Injector; -import org.elasticsearch.common.inject.ModulesBuilder; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsModule; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.EnvironmentModule; import org.elasticsearch.index.Index; -import org.elasticsearch.indices.analysis.AnalysisModule; import org.elasticsearch.plugin.analysis.stempel.AnalysisStempelPlugin; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.IndexSettingsModule; -import org.elasticsearch.test.InternalSettingsPlugin; import java.io.IOException; import java.io.StringReader; @@ -57,11 +47,9 @@ public class SimplePolishTokenFilterTests extends ESTestCase { private void testToken(String source, String expected) throws IOException { Index index = new Index("test", "_na_"); Settings settings = Settings.settingsBuilder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .put("index.analysis.filter.myStemmer.type", "polish_stem") .build(); - AnalysisService analysisService = createAnalysisService(index, settings); + AnalysisService analysisService = createAnalysisService(index, settings, new AnalysisStempelPlugin()::onModule); TokenFilterFactory filterFactory = analysisService.tokenFilter("myStemmer"); @@ -77,12 +65,8 @@ public class SimplePolishTokenFilterTests extends ESTestCase { } private void testAnalyzer(String source, String... expected_terms) throws IOException { - Index index = new Index("test", "_na_"); - Settings settings = Settings.settingsBuilder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .build(); - AnalysisService analysisService = createAnalysisService(index, settings); + AnalysisService analysisService = createAnalysisService(new Index("test", "_na_"), Settings.EMPTY, + new AnalysisStempelPlugin()::onModule); Analyzer analyzer = analysisService.analyzer("polish").analyzer(); @@ -97,14 +81,4 @@ public class SimplePolishTokenFilterTests extends ESTestCase { } } - private AnalysisService createAnalysisService(Index index, Settings settings) throws IOException { - AnalysisModule analysisModule = new AnalysisModule(new Environment(settings)); - new AnalysisStempelPlugin().onModule(analysisModule); - SettingsModule settingsModule = new SettingsModule(settings); - settingsModule.registerSetting(InternalSettingsPlugin.VERSION_CREATED); - Injector parentInjector = new ModulesBuilder().add(settingsModule, - new EnvironmentModule(new Environment(settings)), analysisModule) - .createInjector(); - return parentInjector.getInstance(AnalysisRegistry.class).build(IndexSettingsModule.newIndexSettings(index, settings)); - } } diff --git a/plugins/delete-by-query/src/test/java/org/elasticsearch/plugin/deletebyquery/DeleteByQueryTests.java b/plugins/delete-by-query/src/test/java/org/elasticsearch/plugin/deletebyquery/DeleteByQueryTests.java index 232b056535c..3c595b1ab16 100644 --- a/plugins/delete-by-query/src/test/java/org/elasticsearch/plugin/deletebyquery/DeleteByQueryTests.java +++ b/plugins/delete-by-query/src/test/java/org/elasticsearch/plugin/deletebyquery/DeleteByQueryTests.java @@ -276,7 +276,7 @@ public class DeleteByQueryTests extends ESIntegTestCase { assertSearchContextsClosed(); } - public void testConcurrentDeleteByQueriesOnDifferentDocs() throws Exception { + public void testConcurrentDeleteByQueriesOnDifferentDocs() throws Throwable { createIndex("test"); ensureGreen(); @@ -324,18 +324,17 @@ public class DeleteByQueryTests extends ESIntegTestCase { Throwable assertionError = exceptionHolder.get(); if (assertionError != null) { - assertionError.printStackTrace(); + throw assertionError; } - assertThat(assertionError + " should be null", assertionError, nullValue()); - refresh(); + refresh(); for (int i = 0; i < threads.length; i++) { assertHitCount(client().prepareSearch("test").setSize(0).setQuery(QueryBuilders.termQuery("field", i)).get(), 0); } assertSearchContextsClosed(); } - public void testConcurrentDeleteByQueriesOnSameDocs() throws Exception { + public void testConcurrentDeleteByQueriesOnSameDocs() throws Throwable { assertAcked(prepareCreate("test").setSettings(Settings.settingsBuilder().put("index.refresh_interval", -1))); ensureGreen(); @@ -386,9 +385,8 @@ public class DeleteByQueryTests extends ESIntegTestCase { Throwable assertionError = exceptionHolder.get(); if (assertionError != null) { - assertionError.printStackTrace(); + throw assertionError; } - assertThat(assertionError + " should be null", assertionError, nullValue()); assertHitCount(client().prepareSearch("test").setSize(0).get(), 0L); assertThat(deleted.get(), equalTo(docs)); assertSearchContextsClosed(); @@ -445,4 +443,4 @@ public class DeleteByQueryTests extends ESIntegTestCase { } }); } -} \ No newline at end of file +} diff --git a/plugins/discovery-azure/build.gradle b/plugins/discovery-azure/build.gradle index ec4ef7cb625..1dd2aa26f23 100644 --- a/plugins/discovery-azure/build.gradle +++ b/plugins/discovery-azure/build.gradle @@ -23,7 +23,7 @@ esplugin { } versions << [ - 'azure': '0.9.0', + 'azure': '0.9.3', 'jersey': '1.13' ] diff --git a/plugins/discovery-azure/licenses/azure-core-0.9.0.jar.sha1 b/plugins/discovery-azure/licenses/azure-core-0.9.0.jar.sha1 deleted file mode 100644 index f9696307afe..00000000000 --- a/plugins/discovery-azure/licenses/azure-core-0.9.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -050719f91deceed1be1aaf87e85099a861295fa2 \ No newline at end of file diff --git a/plugins/discovery-azure/licenses/azure-core-0.9.3.jar.sha1 b/plugins/discovery-azure/licenses/azure-core-0.9.3.jar.sha1 new file mode 100644 index 00000000000..5947972663e --- /dev/null +++ b/plugins/discovery-azure/licenses/azure-core-0.9.3.jar.sha1 @@ -0,0 +1 @@ +7fe32241b738aad0f700f4277fa998230c144ae7 \ No newline at end of file diff --git a/plugins/discovery-azure/licenses/azure-svc-mgmt-compute-0.9.0.jar.sha1 b/plugins/discovery-azure/licenses/azure-svc-mgmt-compute-0.9.0.jar.sha1 deleted file mode 100644 index c971d7c5724..00000000000 --- a/plugins/discovery-azure/licenses/azure-svc-mgmt-compute-0.9.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -887ca8ee5564e8ba2351e6b5db2a1293a8d04674 \ No newline at end of file diff --git a/plugins/discovery-azure/licenses/azure-svc-mgmt-compute-0.9.3.jar.sha1 b/plugins/discovery-azure/licenses/azure-svc-mgmt-compute-0.9.3.jar.sha1 new file mode 100644 index 00000000000..d427170d578 --- /dev/null +++ b/plugins/discovery-azure/licenses/azure-svc-mgmt-compute-0.9.3.jar.sha1 @@ -0,0 +1 @@ +602d3e6f5a9f058c2439e8fdf1270cddc811b440 \ No newline at end of file diff --git a/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeService.java b/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeService.java index 0c665c138b8..acc1e76bde4 100644 --- a/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeService.java +++ b/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeService.java @@ -22,26 +22,36 @@ package org.elasticsearch.cloud.azure.management; import com.microsoft.windowsazure.core.utils.KeyStoreType; import com.microsoft.windowsazure.management.compute.models.HostedServiceGetDetailedResponse; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.azure.AzureUnicastHostsProvider; public interface AzureComputeService { final class Management { - public static final Setting SUBSCRIPTION_ID_SETTING = Setting.simpleString("cloud.azure.management.subscription.id", false, Setting.Scope.CLUSTER); - public static final Setting SERVICE_NAME_SETTING = Setting.simpleString("cloud.azure.management.cloud.service.name", false, Setting.Scope.CLUSTER); + public static final Setting SUBSCRIPTION_ID_SETTING = + Setting.simpleString("cloud.azure.management.subscription.id", Property.NodeScope, Property.Filtered); + public static final Setting SERVICE_NAME_SETTING = + Setting.simpleString("cloud.azure.management.cloud.service.name", Property.NodeScope); // Keystore settings - public static final Setting KEYSTORE_PATH_SETTING = Setting.simpleString("cloud.azure.management.keystore.path", false, Setting.Scope.CLUSTER); - public static final Setting KEYSTORE_PASSWORD_SETTING = Setting.simpleString("cloud.azure.management.keystore.password", false, Setting.Scope.CLUSTER); - public static final Setting KEYSTORE_TYPE_SETTING = new Setting<>("cloud.azure.management.keystore.type", KeyStoreType.pkcs12.name(), KeyStoreType::fromString, false, Setting.Scope.CLUSTER); + public static final Setting KEYSTORE_PATH_SETTING = + Setting.simpleString("cloud.azure.management.keystore.path", Property.NodeScope, Property.Filtered); + public static final Setting KEYSTORE_PASSWORD_SETTING = + Setting.simpleString("cloud.azure.management.keystore.password", Property.NodeScope, + Property.Filtered); + public static final Setting KEYSTORE_TYPE_SETTING = + new Setting<>("cloud.azure.management.keystore.type", KeyStoreType.pkcs12.name(), KeyStoreType::fromString, + Property.NodeScope, Property.Filtered); } final class Discovery { - public static final Setting REFRESH_SETTING = Setting.positiveTimeSetting("discovery.azure.refresh_interval", TimeValue.timeValueSeconds(0), false, Setting.Scope.CLUSTER); + public static final Setting REFRESH_SETTING = + Setting.positiveTimeSetting("discovery.azure.refresh_interval", TimeValue.timeValueSeconds(0), Property.NodeScope); - public static final Setting HOST_TYPE_SETTING = new Setting<>("discovery.azure.host.type", - AzureUnicastHostsProvider.HostType.PRIVATE_IP.name(), AzureUnicastHostsProvider.HostType::fromString, false, Setting.Scope.CLUSTER); + public static final Setting HOST_TYPE_SETTING = + new Setting<>("discovery.azure.host.type", AzureUnicastHostsProvider.HostType.PRIVATE_IP.name(), + AzureUnicastHostsProvider.HostType::fromString, Property.NodeScope); public static final String ENDPOINT_NAME = "discovery.azure.endpoint.name"; public static final String DEPLOYMENT_NAME = "discovery.azure.deployment.name"; diff --git a/plugins/discovery-azure/src/main/java/org/elasticsearch/plugin/discovery/azure/AzureDiscoveryPlugin.java b/plugins/discovery-azure/src/main/java/org/elasticsearch/plugin/discovery/azure/AzureDiscoveryPlugin.java index b2b94b6c3bb..e5de7a0cb02 100644 --- a/plugins/discovery-azure/src/main/java/org/elasticsearch/plugin/discovery/azure/AzureDiscoveryPlugin.java +++ b/plugins/discovery-azure/src/main/java/org/elasticsearch/plugin/discovery/azure/AzureDiscoveryPlugin.java @@ -75,10 +75,5 @@ public class AzureDiscoveryPlugin extends Plugin { settingsModule.registerSetting(AzureComputeService.Management.SUBSCRIPTION_ID_SETTING); settingsModule.registerSetting(AzureComputeService.Management.SERVICE_NAME_SETTING); settingsModule.registerSetting(AzureComputeService.Discovery.HOST_TYPE_SETTING); - // Cloud management API settings we need to hide - settingsModule.registerSettingsFilter(AzureComputeService.Management.KEYSTORE_PATH_SETTING.getKey()); - settingsModule.registerSettingsFilter(AzureComputeService.Management.KEYSTORE_PASSWORD_SETTING.getKey()); - settingsModule.registerSettingsFilter(AzureComputeService.Management.KEYSTORE_TYPE_SETTING.getKey()); - settingsModule.registerSettingsFilter(AzureComputeService.Management.SUBSCRIPTION_ID_SETTING.getKey()); } } diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2Service.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2Service.java index a90d3573468..8cfe6c43108 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2Service.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2Service.java @@ -22,6 +22,7 @@ package org.elasticsearch.cloud.aws; import com.amazonaws.Protocol; import com.amazonaws.services.ec2.AmazonEC2; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -32,7 +33,7 @@ import java.util.Locale; import java.util.function.Function; public interface AwsEc2Service { - Setting AUTO_ATTRIBUTE_SETTING = Setting.boolSetting("cloud.node.auto_attributes", false, false, Setting.Scope.CLUSTER); + Setting AUTO_ATTRIBUTE_SETTING = Setting.boolSetting("cloud.node.auto_attributes", false, Property.NodeScope); // Global AWS settings (shared between discovery-ec2 and repository-s3) // Each setting starting with `cloud.aws` also exists in repository-s3 project. Don't forget to update @@ -40,40 +41,44 @@ public interface AwsEc2Service { /** * cloud.aws.access_key: AWS Access key. Shared with repository-s3 plugin */ - Setting KEY_SETTING = Setting.simpleString("cloud.aws.access_key", false, Setting.Scope.CLUSTER); + Setting KEY_SETTING = + Setting.simpleString("cloud.aws.access_key", Property.NodeScope, Property.Filtered); /** * cloud.aws.secret_key: AWS Secret key. Shared with repository-s3 plugin */ - Setting SECRET_SETTING = Setting.simpleString("cloud.aws.secret_key", false, Setting.Scope.CLUSTER); + Setting SECRET_SETTING = + Setting.simpleString("cloud.aws.secret_key", Property.NodeScope, Property.Filtered); /** * cloud.aws.protocol: Protocol for AWS API: http or https. Defaults to https. Shared with repository-s3 plugin */ Setting PROTOCOL_SETTING = new Setting<>("cloud.aws.protocol", "https", s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), - false, Setting.Scope.CLUSTER); + Property.NodeScope); /** * cloud.aws.proxy.host: In case of proxy, define its hostname/IP. Shared with repository-s3 plugin */ - Setting PROXY_HOST_SETTING = Setting.simpleString("cloud.aws.proxy.host", false, Setting.Scope.CLUSTER); + Setting PROXY_HOST_SETTING = Setting.simpleString("cloud.aws.proxy.host", Property.NodeScope); /** * cloud.aws.proxy.port: In case of proxy, define its port. Defaults to 80. Shared with repository-s3 plugin */ - Setting PROXY_PORT_SETTING = Setting.intSetting("cloud.aws.proxy.port", 80, 0, 1<<16, false, Setting.Scope.CLUSTER); + Setting PROXY_PORT_SETTING = Setting.intSetting("cloud.aws.proxy.port", 80, 0, 1<<16, Property.NodeScope); /** * cloud.aws.proxy.username: In case of proxy with auth, define the username. Shared with repository-s3 plugin */ - Setting PROXY_USERNAME_SETTING = Setting.simpleString("cloud.aws.proxy.username", false, Setting.Scope.CLUSTER); + Setting PROXY_USERNAME_SETTING = Setting.simpleString("cloud.aws.proxy.username", Property.NodeScope); /** * cloud.aws.proxy.password: In case of proxy with auth, define the password. Shared with repository-s3 plugin */ - Setting PROXY_PASSWORD_SETTING = Setting.simpleString("cloud.aws.proxy.password", false, Setting.Scope.CLUSTER); + Setting PROXY_PASSWORD_SETTING = + Setting.simpleString("cloud.aws.proxy.password", Property.NodeScope, Property.Filtered); /** * cloud.aws.signer: If you are using an old AWS API version, you can define a Signer. Shared with repository-s3 plugin */ - Setting SIGNER_SETTING = Setting.simpleString("cloud.aws.signer", false, Setting.Scope.CLUSTER); + Setting SIGNER_SETTING = Setting.simpleString("cloud.aws.signer", Property.NodeScope); /** * cloud.aws.region: Region. Shared with repository-s3 plugin */ - Setting REGION_SETTING = new Setting<>("cloud.aws.region", "", s -> s.toLowerCase(Locale.ROOT), false, Setting.Scope.CLUSTER); + Setting REGION_SETTING = + new Setting<>("cloud.aws.region", "", s -> s.toLowerCase(Locale.ROOT), Property.NodeScope); /** * Defines specific ec2 settings starting with cloud.aws.ec2. @@ -83,63 +88,63 @@ public interface AwsEc2Service { * cloud.aws.ec2.access_key: AWS Access key specific for EC2 API calls. Defaults to cloud.aws.access_key. * @see AwsEc2Service#KEY_SETTING */ - Setting KEY_SETTING = new Setting<>("cloud.aws.ec2.access_key", AwsEc2Service.KEY_SETTING, Function.identity(), false, - Setting.Scope.CLUSTER); + Setting KEY_SETTING = new Setting<>("cloud.aws.ec2.access_key", AwsEc2Service.KEY_SETTING, Function.identity(), + Property.NodeScope, Property.Filtered); /** * cloud.aws.ec2.secret_key: AWS Secret key specific for EC2 API calls. Defaults to cloud.aws.secret_key. * @see AwsEc2Service#SECRET_SETTING */ - Setting SECRET_SETTING = new Setting<>("cloud.aws.ec2.secret_key", AwsEc2Service.SECRET_SETTING, Function.identity(), false, - Setting.Scope.CLUSTER); + Setting SECRET_SETTING = new Setting<>("cloud.aws.ec2.secret_key", AwsEc2Service.SECRET_SETTING, Function.identity(), + Property.NodeScope, Property.Filtered); /** * cloud.aws.ec2.protocol: Protocol for AWS API specific for EC2 API calls: http or https. Defaults to cloud.aws.protocol. * @see AwsEc2Service#PROTOCOL_SETTING */ Setting PROTOCOL_SETTING = new Setting<>("cloud.aws.ec2.protocol", AwsEc2Service.PROTOCOL_SETTING, - s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), false, Setting.Scope.CLUSTER); + s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope); /** * cloud.aws.ec2.proxy.host: In case of proxy, define its hostname/IP specific for EC2 API calls. Defaults to cloud.aws.proxy.host. * @see AwsEc2Service#PROXY_HOST_SETTING */ Setting PROXY_HOST_SETTING = new Setting<>("cloud.aws.ec2.proxy.host", AwsEc2Service.PROXY_HOST_SETTING, - Function.identity(), false, Setting.Scope.CLUSTER); + Function.identity(), Property.NodeScope); /** * cloud.aws.ec2.proxy.port: In case of proxy, define its port specific for EC2 API calls. Defaults to cloud.aws.proxy.port. * @see AwsEc2Service#PROXY_PORT_SETTING */ Setting PROXY_PORT_SETTING = new Setting<>("cloud.aws.ec2.proxy.port", AwsEc2Service.PROXY_PORT_SETTING, - s -> Setting.parseInt(s, 0, 1<<16, "cloud.aws.ec2.proxy.port"), false, Setting.Scope.CLUSTER); + s -> Setting.parseInt(s, 0, 1<<16, "cloud.aws.ec2.proxy.port"), Property.NodeScope); /** * cloud.aws.ec2.proxy.username: In case of proxy with auth, define the username specific for EC2 API calls. * Defaults to cloud.aws.proxy.username. * @see AwsEc2Service#PROXY_USERNAME_SETTING */ Setting PROXY_USERNAME_SETTING = new Setting<>("cloud.aws.ec2.proxy.username", AwsEc2Service.PROXY_USERNAME_SETTING, - Function.identity(), false, Setting.Scope.CLUSTER); + Function.identity(), Property.NodeScope); /** * cloud.aws.ec2.proxy.password: In case of proxy with auth, define the password specific for EC2 API calls. * Defaults to cloud.aws.proxy.password. * @see AwsEc2Service#PROXY_PASSWORD_SETTING */ Setting PROXY_PASSWORD_SETTING = new Setting<>("cloud.aws.ec2.proxy.password", AwsEc2Service.PROXY_PASSWORD_SETTING, - Function.identity(), false, Setting.Scope.CLUSTER); + Function.identity(), Property.NodeScope, Property.Filtered); /** * cloud.aws.ec2.signer: If you are using an old AWS API version, you can define a Signer. Specific for EC2 API calls. * Defaults to cloud.aws.signer. * @see AwsEc2Service#SIGNER_SETTING */ Setting SIGNER_SETTING = new Setting<>("cloud.aws.ec2.signer", AwsEc2Service.SIGNER_SETTING, Function.identity(), - false, Setting.Scope.CLUSTER); + Property.NodeScope); /** * cloud.aws.ec2.region: Region specific for EC2 API calls. Defaults to cloud.aws.region. * @see AwsEc2Service#REGION_SETTING */ Setting REGION_SETTING = new Setting<>("cloud.aws.ec2.region", AwsEc2Service.REGION_SETTING, - s -> s.toLowerCase(Locale.ROOT), false, Setting.Scope.CLUSTER); + s -> s.toLowerCase(Locale.ROOT), Property.NodeScope); /** * cloud.aws.ec2.endpoint: Endpoint. If not set, endpoint will be guessed based on region setting. */ - Setting ENDPOINT_SETTING = Setting.simpleString("cloud.aws.ec2.endpoint", false, Setting.Scope.CLUSTER); + Setting ENDPOINT_SETTING = Setting.simpleString("cloud.aws.ec2.endpoint", Property.NodeScope); } /** @@ -158,32 +163,32 @@ public interface AwsEc2Service { * Can be one of private_ip, public_ip, private_dns, public_dns. Defaults to private_ip. */ Setting HOST_TYPE_SETTING = - new Setting<>("discovery.ec2.host_type", HostType.PRIVATE_IP.name(), s -> HostType.valueOf(s.toUpperCase(Locale.ROOT)), false, - Setting.Scope.CLUSTER); + new Setting<>("discovery.ec2.host_type", HostType.PRIVATE_IP.name(), s -> HostType.valueOf(s.toUpperCase(Locale.ROOT)), + Property.NodeScope); /** * discovery.ec2.any_group: If set to false, will require all security groups to be present for the instance to be used for the * discovery. Defaults to true. */ Setting ANY_GROUP_SETTING = - Setting.boolSetting("discovery.ec2.any_group", true, false, Setting.Scope.CLUSTER); + Setting.boolSetting("discovery.ec2.any_group", true, Property.NodeScope); /** * discovery.ec2.groups: Either a comma separated list or array based list of (security) groups. Only instances with the provided * security groups will be used in the cluster discovery. (NOTE: You could provide either group NAME or group ID.) */ Setting> GROUPS_SETTING = - Setting.listSetting("discovery.ec2.groups", new ArrayList<>(), s -> s.toString(), false, Setting.Scope.CLUSTER); + Setting.listSetting("discovery.ec2.groups", new ArrayList<>(), s -> s.toString(), Property.NodeScope); /** * discovery.ec2.availability_zones: Either a comma separated list or array based list of availability zones. Only instances within * the provided availability zones will be used in the cluster discovery. */ Setting> AVAILABILITY_ZONES_SETTING = - Setting.listSetting("discovery.ec2.availability_zones", Collections.emptyList(), s -> s.toString(), false, - Setting.Scope.CLUSTER); + Setting.listSetting("discovery.ec2.availability_zones", Collections.emptyList(), s -> s.toString(), + Property.NodeScope); /** * discovery.ec2.node_cache_time: How long the list of hosts is cached to prevent further requests to the AWS API. Defaults to 10s. */ Setting NODE_CACHE_TIME_SETTING = - Setting.timeSetting("discovery.ec2.node_cache_time", TimeValue.timeValueSeconds(10), false, Setting.Scope.CLUSTER); + Setting.timeSetting("discovery.ec2.node_cache_time", TimeValue.timeValueSeconds(10), Property.NodeScope); /** * discovery.ec2.tag.*: The ec2 discovery can filter machines to include in the cluster based on tags (and not just groups). @@ -191,7 +196,7 @@ public interface AwsEc2Service { * instances with a tag key set to stage, and a value of dev. Several tags set will require all of those tags to be set for the * instance to be included. */ - Setting TAG_SETTING = Setting.groupSetting("discovery.ec2.tag.", false,Setting.Scope.CLUSTER); + Setting TAG_SETTING = Setting.groupSetting("discovery.ec2.tag.", Property.NodeScope); } AmazonEC2 client(); diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsSigner.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsSigner.java index 36eae9b5829..a76a2b04a91 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsSigner.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsSigner.java @@ -55,7 +55,7 @@ public class AwsSigner { try { validateSignerType(signer); } catch (IllegalArgumentException e) { - logger.warn(e.getMessage()); + logger.warn("{}", e.getMessage()); } configuration.setSignerOverride(signer); diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/plugin/discovery/ec2/Ec2DiscoveryPlugin.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/plugin/discovery/ec2/Ec2DiscoveryPlugin.java index 12d5682d487..b4c293ec736 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/plugin/discovery/ec2/Ec2DiscoveryPlugin.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/plugin/discovery/ec2/Ec2DiscoveryPlugin.java @@ -134,14 +134,6 @@ public class Ec2DiscoveryPlugin extends Plugin { settingsModule.registerSetting(AwsEc2Service.DISCOVERY_EC2.GROUPS_SETTING); settingsModule.registerSetting(AwsEc2Service.DISCOVERY_EC2.AVAILABILITY_ZONES_SETTING); settingsModule.registerSetting(AwsEc2Service.DISCOVERY_EC2.NODE_CACHE_TIME_SETTING); - - // Filter global settings - settingsModule.registerSettingsFilterIfMissing(AwsEc2Service.KEY_SETTING.getKey()); - settingsModule.registerSettingsFilterIfMissing(AwsEc2Service.SECRET_SETTING.getKey()); - settingsModule.registerSettingsFilterIfMissing(AwsEc2Service.PROXY_PASSWORD_SETTING.getKey()); - settingsModule.registerSettingsFilterIfMissing(AwsEc2Service.CLOUD_EC2.KEY_SETTING.getKey()); - settingsModule.registerSettingsFilterIfMissing(AwsEc2Service.CLOUD_EC2.SECRET_SETTING.getKey()); - settingsModule.registerSettingsFilterIfMissing(AwsEc2Service.CLOUD_EC2.PROXY_PASSWORD_SETTING.getKey()); } /** diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java index 5063d59b40e..1705421207b 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java @@ -191,7 +191,7 @@ public class Ec2DiscoveryTests extends ESTestCase { tagsList.add(tags); } - logger.info("started [{}] instances with [{}] stage=prod tag"); + logger.info("started [{}] instances with [{}] stage=prod tag", nodes, prodInstances); List discoveryNodes = buildDynamicNodes(nodeSettings, nodes, tagsList); assertThat(discoveryNodes, hasSize(prodInstances)); } @@ -222,7 +222,7 @@ public class Ec2DiscoveryTests extends ESTestCase { tagsList.add(tags); } - logger.info("started [{}] instances with [{}] stage=prod tag"); + logger.info("started [{}] instances with [{}] stage=prod tag", nodes, prodInstances); List discoveryNodes = buildDynamicNodes(nodeSettings, nodes, tagsList); assertThat(discoveryNodes, hasSize(prodInstances)); } diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeService.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeService.java index ce5154b3436..a6faa390e5d 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeService.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeService.java @@ -22,12 +22,14 @@ package org.elasticsearch.cloud.gce; import com.google.api.services.compute.model.Instance; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.TimeValue; import java.io.IOException; import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.function.Function; public interface GceComputeService extends LifecycleComponent { @@ -41,25 +43,25 @@ public interface GceComputeService extends LifecycleComponent /** * cloud.gce.project_id: Google project id */ - Setting PROJECT_SETTING = Setting.simpleString("cloud.gce.project_id", false, Setting.Scope.CLUSTER); + Setting PROJECT_SETTING = Setting.simpleString("cloud.gce.project_id", Property.NodeScope); /** * cloud.gce.zone: Google Compute Engine zones */ Setting> ZONE_SETTING = - Setting.listSetting("cloud.gce.zone", Collections.emptyList(), s -> s, false, Setting.Scope.CLUSTER); + Setting.listSetting("cloud.gce.zone", Collections.emptyList(), Function.identity(), Property.NodeScope); /** * cloud.gce.refresh_interval: How long the list of hosts is cached to prevent further requests to the AWS API. 0 disables caching. * A negative value will cause infinite caching. Defaults to 0s. */ Setting REFRESH_SETTING = - Setting.timeSetting("cloud.gce.refresh_interval", TimeValue.timeValueSeconds(0), false, Setting.Scope.CLUSTER); + Setting.timeSetting("cloud.gce.refresh_interval", TimeValue.timeValueSeconds(0), Property.NodeScope); /** * cloud.gce.retry: Should we retry calling GCE API in case of error? Defaults to true. */ - Setting RETRY_SETTING = Setting.boolSetting("cloud.gce.retry", true, false, Setting.Scope.CLUSTER); + Setting RETRY_SETTING = Setting.boolSetting("cloud.gce.retry", true, Property.NodeScope); /** * cloud.gce.max_wait: How long exponential backoff should retry before definitely failing. @@ -67,7 +69,7 @@ public interface GceComputeService extends LifecycleComponent * A negative value will retry indefinitely. Defaults to `-1s` (retry indefinitely). */ Setting MAX_WAIT_SETTING = - Setting.timeSetting("cloud.gce.max_wait", TimeValue.timeValueSeconds(-1), false, Setting.Scope.CLUSTER); + Setting.timeSetting("cloud.gce.max_wait", TimeValue.timeValueSeconds(-1), Property.NodeScope); /** * Return a collection of running instances within the same GCE project diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeServiceImpl.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeServiceImpl.java index d9033b602d2..85e0910736f 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeServiceImpl.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceComputeServiceImpl.java @@ -38,13 +38,13 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.gce.RetryHttpInitializerWrapper; import java.io.IOException; import java.net.URL; -import java.nio.file.Files; import java.security.AccessController; import java.security.GeneralSecurityException; import java.security.PrivilegedAction; @@ -61,11 +61,11 @@ public class GceComputeServiceImpl extends AbstractLifecycleComponent GCE_VALIDATE_CERTIFICATES = - Setting.boolSetting("cloud.gce.validate_certificates", true, false, Setting.Scope.CLUSTER); + Setting.boolSetting("cloud.gce.validate_certificates", true, Property.NodeScope); public static final Setting GCE_HOST = - new Setting<>("cloud.gce.host", "http://metadata.google.internal", Function.identity(), false, Setting.Scope.CLUSTER); + new Setting<>("cloud.gce.host", "http://metadata.google.internal", Function.identity(), Property.NodeScope); public static final Setting GCE_ROOT_URL = - new Setting<>("cloud.gce.root_url", "https://www.googleapis.com", Function.identity(), false, Setting.Scope.CLUSTER); + new Setting<>("cloud.gce.root_url", "https://www.googleapis.com", Function.identity(), Property.NodeScope); private final String project; private final List zones; diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java index 4e7956c379e..85f3e3a9585 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; @@ -43,6 +44,7 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.function.Function; /** * @@ -53,7 +55,7 @@ public class GceUnicastHostsProvider extends AbstractComponent implements Unicas * discovery.gce.tags: The gce discovery can filter machines to include in the cluster based on tags. */ public static final Setting> TAGS_SETTING = - Setting.listSetting("discovery.gce.tags", Collections.emptyList(), s -> s, false, Setting.Scope.CLUSTER); + Setting.listSetting("discovery.gce.tags", Collections.emptyList(), Function.identity(), Property.NodeScope); static final class Status { private static final String TERMINATED = "TERMINATED"; diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java index d355276ceae..2b92c4fd8c1 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java @@ -47,7 +47,7 @@ public class GceDiscoveryPlugin extends Plugin { static { /* * GCE's http client changes access levels because its silly and we - * can't allow that on any old stack stack so we pull it here, up front, + * can't allow that on any old stack so we pull it here, up front, * so we can cleanly check the permissions for it. Without this changing * the permission can fail if any part of core is on the stack because * our plugin permissions don't allow core to "reach through" plugins to diff --git a/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/TikaImpl.java b/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/TikaImpl.java index 2ea977b4dd1..27a4cfebbcd 100644 --- a/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/TikaImpl.java +++ b/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/TikaImpl.java @@ -137,6 +137,8 @@ final class TikaImpl { perms.add(new SecurityPermission("putProviderProperty.BC")); perms.add(new SecurityPermission("insertProvider")); perms.add(new ReflectPermission("suppressAccessChecks")); + // xmlbeans, use by POI, needs to get the context classloader + perms.add(new RuntimePermission("getClassLoader")); perms.setReadOnly(); return perms; } diff --git a/plugins/ingest-attachment/src/main/plugin-metadata/plugin-security.policy b/plugins/ingest-attachment/src/main/plugin-metadata/plugin-security.policy index e23e9f4d0cf..adf76991b59 100644 --- a/plugins/ingest-attachment/src/main/plugin-metadata/plugin-security.policy +++ b/plugins/ingest-attachment/src/main/plugin-metadata/plugin-security.policy @@ -27,4 +27,6 @@ grant { permission java.security.SecurityPermission "insertProvider"; // TODO: fix POI XWPF to not do this: https://bz.apache.org/bugzilla/show_bug.cgi?id=58597 permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; + // needed by xmlbeans, as part of POI for MS xml docs + permission java.lang.RuntimePermission "getClassLoader"; }; diff --git a/plugins/ingest-attachment/src/test/resources/rest-api-spec/test/ingest_attachment/10_basic.yaml b/plugins/ingest-attachment/src/test/resources/rest-api-spec/test/ingest_attachment/10_basic.yaml index ed752971fcb..7c789b9c2ca 100644 --- a/plugins/ingest-attachment/src/test/resources/rest-api-spec/test/ingest_attachment/10_basic.yaml +++ b/plugins/ingest-attachment/src/test/resources/rest-api-spec/test/ingest_attachment/10_basic.yaml @@ -1,5 +1,26 @@ "Ingest attachment plugin installed": - do: - cluster.stats: {} + cluster.state: {} + + - set: {master_node: master} + + - do: + nodes.info: {} + + - match: { nodes.$master.plugins.0.name: ingest-attachment } + - match: { nodes.$master.ingest.processors.0.type: append } + - match: { nodes.$master.ingest.processors.1.type: attachment } + - match: { nodes.$master.ingest.processors.2.type: convert } + - match: { nodes.$master.ingest.processors.3.type: date } + - match: { nodes.$master.ingest.processors.4.type: fail } + - match: { nodes.$master.ingest.processors.5.type: foreach } + - match: { nodes.$master.ingest.processors.6.type: gsub } + - match: { nodes.$master.ingest.processors.7.type: join } + - match: { nodes.$master.ingest.processors.8.type: lowercase } + - match: { nodes.$master.ingest.processors.9.type: remove } + - match: { nodes.$master.ingest.processors.10.type: rename } + - match: { nodes.$master.ingest.processors.11.type: set } + - match: { nodes.$master.ingest.processors.12.type: split } + - match: { nodes.$master.ingest.processors.13.type: trim } + - match: { nodes.$master.ingest.processors.14.type: uppercase } - - match: { nodes.plugins.0.name: ingest-attachment } diff --git a/plugins/ingest-attachment/src/test/resources/rest-api-spec/test/ingest_attachment/30_files_supported.yaml b/plugins/ingest-attachment/src/test/resources/rest-api-spec/test/ingest_attachment/30_files_supported.yaml new file mode 100644 index 00000000000..4f56603a678 --- /dev/null +++ b/plugins/ingest-attachment/src/test/resources/rest-api-spec/test/ingest_attachment/30_files_supported.yaml @@ -0,0 +1,79 @@ +--- +"Test ingest attachment processor with .doc file": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "attachment" : { + "source_field" : "field1" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + type: test + id: 1 + pipeline: "my_pipeline" + body: { field1: "0M8R4KGxGuEAAAAAAAAAAAAAAAAAAAAAPgADAP7/CQAGAAAAAAAAAAAAAAAEAAAAjAEAAAAAAAAAEAAAjgEAAAEAAAD+////AAAAAIgBAACJAQAAigEAAIsBAAD////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////spcEAg+kMBAAA8BK/AAAAAAABEQABAAEACAAAEwgAAA4AYmpiaoI4gjgAAAAAAAAAAAAAAAAAAAAAAAAMBBYANA4AAOBSAADgUgAAEwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD//w8AAAAAAAAAAAD//w8AAAAAAAAAAAD//w8AAAAAAAAAAAAAAAAAAAAAALcAAAAAAFAHAAAAAAAAUAcAAMcUAAAAAAAAxxQAAAAAAADHFAAAAAAAAMcUAAAAAAAAxxQAABQAAAAAAAAAAAAAAP////8AAAAA2xQAAAAAAADbFAAAAAAAANsUAAAAAAAA2xQAAAwAAADnFAAADAAAANsUAAAAAAAA3hUAADABAADzFAAAAAAAAPMUAAAAAAAA8xQAAAAAAADzFAAAAAAAAPMUAAAAAAAA8xQAAAAAAADzFAAAAAAAAPMUAAAAAAAAVRUAAAIAAABXFQAAAAAAAFcVAAAAAAAAVxUAAAAAAABXFQAAAAAAAFcVAAAAAAAAVxUAACwAAAAOFwAAtgIAAMQZAABaAAAAgxUAABUAAAAAAAAAAAAAAAAAAAAAAAAAxxQAAAAAAADzFAAAAAAAAAAAAAAAAAAAAAAAAAAAAADzFAAAAAAAAPMUAAAAAAAA8xQAAAAAAADzFAAAAAAAAIMVAAAAAAAAGRUAAAAAAADHFAAAAAAAAMcUAAAAAAAA8xQAAAAAAAAAAAAAAAAAAPMUAAAAAAAAmBUAABYAAAAZFQAAAAAAABkVAAAAAAAAGRUAAAAAAADzFAAAFgAAAMcUAAAAAAAA8xQAAAAAAADHFAAAAAAAAPMUAAAAAAAAVRUAAAAAAAAAAAAAAAAAABkVAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA8xQAAAAAAABVFQAAAAAAAAAAAAAAAAAAGRUAAAAAAAAAAAAAAAAAABkVAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGRUAAAAAAAAAAAAAAAAAAP////8AAAAAgI6XYKZ60QEAAAAAAAAAAP////8AAAAACRUAABAAAAAZFQAAAAAAAAAAAAAAAAAAQRUAABQAAACuFQAAMAAAAN4VAAAAAAAAGRUAAAAAAAAeGgAAAAAAABkVAAAAAAAAHhoAAAAAAAAZFQAAAAAAABkVAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADHFAAAAAAAABkVAAAoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA8xQAAAAAAADzFAAAAAAAAPMUAAAAAAAAgxUAAAAAAACDFQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGRUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPMUAAAAAAAA8xQAAAAAAADzFAAAAAAAAN4VAAAAAAAA8xQAAAAAAADzFAAAAAAAAPMUAAAAAAAA8xQAAAAAAAAAAAAAAAAAAP////8AAAAA/////wAAAAD/////AAAAAAAAAAAAAAAA/////wAAAAD/////AAAAAP////8AAAAA/////wAAAAD/////AAAAAP////8AAAAA/////wAAAAD/////AAAAAP////8AAAAA/////wAAAAD/////AAAAAP////8AAAAA/////wAAAAD/////AAAAAB4aAAAAAAAA8xQAAAAAAADzFAAAAAAAAPMUAAAAAAAA8xQAAAAAAADzFAAAAAAAAPMUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADzFAAAAAAAAPMUAAAAAAAA8xQAAAAAAABQBwAAPQwAAI0TAAA6AQAABwAMAQ8ADQEAAAwEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFRlc3QgZWxhc3RpY3NlYXJjaA0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAABIIAAATCAAA/PgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYWaJVGuQAABhZo3wiGAAIACAAAEwgAAP0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAATIAMZBoATpwpBeqAB+wfC4gsMhBIbCJBSKwiQUjkIkFJJCJBSWwAAAXsMQCGLDEAgyQxAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAALgYPABIAAQB8AQ8ACAADAAMAAwAAAAQACAAAAJgAAACeAAAAngAAAJ4AAACeAAAAngAAAJ4AAACeAAAAngAAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAAHYCAAB2AgAAdgIAAHYCAAB2AgAAdgIAAHYCAAB2AgAAdgIAADYGAAA2BgAANgYAADYGAAA2BgAANgYAAD4CAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAACoAAAANgYAADYGAAAWAAAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAC4AAAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAAaAEAAEgBAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAAHACAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAAMgYAABgAAADGAwAA1gMAAOYDAAD2AwAABgQAABYEAAAmBAAANgQAAEYEAABWBAAAZgQAAHYEAACGBAAAlgQAAMYDAADWAwAA5gMAAPYDAAAGBAAAFgQAADIGAAAoAgAA2AEAAOgBAAAmBAAANgQAAEYEAABWBAAAZgQAAHYEAACGBAAAlgQAAMYDAADWAwAA5gMAAPYDAAAGBAAAFgQAACYEAAA2BAAARgQAAFYEAABmBAAAdgQAAIYEAACWBAAAxgMAANYDAADmAwAA9gMAAAYEAAAWBAAAJgQAADYEAABGBAAAVgQAAGYEAAB2BAAAhgQAAJYEAADGAwAA1gMAAOYDAAD2AwAABgQAABYEAAAmBAAANgQAAEYEAABWBAAAZgQAAHYEAACGBAAAlgQAAMYDAADWAwAA5gMAAPYDAAAGBAAAFgQAACYEAAA2BAAARgQAAFYEAABmBAAAdgQAAIYEAACWBAAAxgMAANYDAADmAwAA9gMAAAYEAAAWBAAAJgQAADYEAABGBAAAVgQAAGYEAAB2BAAAhgQAAJYEAAA4AQAAWAEAAPgBAAAIAgAAGAIAAFYCAAB+AgAAkAIAAKACAACwAgAAwAIAANACAACAAgAA4AIAAPACAAAAAwAAEAMAACADAAAwAwAAQAMAAOACAADwAgAAAAMAABADAAAgAwAAMAMAAEADAADgAgAA8AIAAAADAAAQAwAAIAMAADADAABAAwAA4AIAAPACAAAAAwAAEAMAACADAAAwAwAAQAMAAOACAADwAgAAAAMAABADAAAgAwAAMAMAAEADAADgAgAA8AIAAAADAAAQAwAAIAMAADADAABAAwAA4AIAAPACAAAAAwAAEAMAACADAAAwAwAAQAMAAOACAADwAgAAAAMAABADAAAgAwAAMAMAAEADAADgAgAA8AIAAAADAAAQAwAAIAMAADADAABAAwAA4AIAAPACAAAAAwAAEAMAACADAAAwAwAAQAMAAOACAADwAgAAAAMAABADAAAgAwAAMAMAAEADAADgAgAA8AIAAAADAAAQAwAAIAMAADADAABAAwAA4AIAAPACAAAAAwAAEAMAACADAAAwAwAAQAMAAOACAADwAgAAAAMAABADAAAgAwAAMAMAAEADAAAgAAAAT0oDAFBKAwBRSgMAX0gBBG1IDARuSAwEc0gMBHRIDAQAAAAAQAAAYPH/AgBAAAwQAAAAAAAAAAAGAE4AbwByAG0AYQBsAAAAAgAAABgAQ0oYAF9IAQRhShgAbUgMBHNIDAR0SAkEAAAAAAAAAAAAAAAAAAAAAAAAOgBBIPL/oQA6AAwNAAAAAAAAEAARAFAAbwBsAGkAYwBlACAAcABhAHIAIABkAOkAZgBhAHUAdAAAAAAAVgBpAPP/swBWAAwNAAAAAAAAMAYOAFQAYQBiAGwAZQBhAHUAIABOAG8AcgBtAGEAbAAAABwAF/YDAAA01gYAAQoDbAA01gYAAQUDAABh9gMAAAIACwAAADIAayD0/8EAMgAADQAAAAAAADAGDABBAHUAYwB1AG4AZQAgAGwAaQBzAHQAZQAAAAIADAAAAAAAUEsDBBQABgAIAAAAIQCb6HBP/AAAABwCAAATAAAAW0NvbnRlbnRfVHlwZXNdLnhtbKyRy2rDMBBF94X+g9C22HK6KKXYzqKPXR+L9AMGeWyL2CMhTULy9x07LpQSAoVuBNLMvffMqFwfxkHtMSbnqdKrvNAKyfrGUVfpz81Ldq9VYqAGBk9Y6SMmva6vr8rNMWBSoqZU6Z45PBiTbI8jpNwHJKm0Po7Aco2dCWC30KG5LYo7Yz0xEmc8eei6fMIWdgOr54M8n0hErtXjqW+KqjSEMDgLLKBmqpqzuohDuiDcU/OLLlvIclHO5ql3Id0sCe+ymugaVB8Q+Q1G4TAsQ+LP8xVIRov5ZeYz0b5tncXG290o68hn48XsTwCr/4n+zjTz39ZfAAAA//8DAFBLAwQUAAYACAAAACEApdan58AAAAA2AQAACwAAAF9yZWxzLy5yZWxzhI/PasMwDIfvhb2D0X1R0sMYJXYvpZBDL6N9AOEof2giG9sb69tPxwYKuwiEpO/3qT3+rov54ZTnIBaaqgbD4kM/y2jhdj2/f4LJhaSnJQhbeHCGo3vbtV+8UNGjPM0xG6VItjCVEg+I2U+8Uq5CZNHJENJKRds0YiR/p5FxX9cfmJ4Z4DZM0/UWUtc3YK6PqMn/s8MwzJ5PwX+vLOVFBG43lExp5GKhqC/jU72QqGWq1B7Qtbj51v0BAAD//wMAUEsDBBQABgAIAAAAIQBreZYWgwAAAIoAAAAcAAAAdGhlbWUvdGhlbWUvdGhlbWVNYW5hZ2VyLnhtbAzMTQrDIBBA4X2hd5DZN2O7KEVissuuu/YAQ5waQceg0p/b1+XjgzfO3xTVm0sNWSycBw2KZc0uiLfwfCynG6jaSBzFLGzhxxXm6XgYybSNE99JyHNRfSPVkIWttd0g1rUr1SHvLN1euSRqPYtHV+jT9yniResrJgoCOP0BAAD//wMAUEsDBBQABgAIAAAAIQBtTVmryAYAAI4aAAAWAAAAdGhlbWUvdGhlbWUvdGhlbWUxLnhtbOxZ3YrbRhS+L/QdhO4d/0n+WeINtmxv2uwmIXbS5nJWHkuTHWmMZrwbEwJ9gkIhLb0p9K6F3gTaN+i7pLTpQ/TMSJZn7HH2hy2E0jUs8vg7Z7455+g7I83dey8T6pzjjBOW9tz6nZrr4DRkM5JGPffpdFzpuA4XKJ0hylLcc1eYu/cOP/3kLjoQMU6wA/YpP0A9NxZicVCt8hCGEb/DFjiF3+YsS5CAr1lUnWXoAvwmtNqo1VrVBJHUdVKUgNtp/PvP4OzRfE5C7B6uvY8oTJEKLgdCmk2kb1yYDJYZRkuFnZ3VJYKveEAz5xzRngsTzdjFFL8UrkMRF/BDz62pP7d6eLeKDgojKvbYanZj9VfYFQazs4aaM4tOy0k9z/da/dK/AlCxixu1R61Rq/SnACgMYaU5F92nP+gOhn6B1UD5pcX3sD1s1g285r+5w7nvy4+BV6Dcv7eDH48DiKKBV6Ac7+/gPa/dCDwDr0A5vrWDb9f6Q69t4BUopiQ920HX/FYzWK+2hMwZvW+Fd31v3G4UzjcoqIayuuQUc5aKfbWWoBcsGwNAAikSJHXEaoHnKIQyDhAlpxlxjkkUQ+EtUMo4DNcatXGtCf/lx1NXKiLoACPNWvICJnxnSPJxeJiRhei5n4NXV4M8XzpHTMQkLGZVTgyL+yiNdIv3P33z9w9fOX/9+uP7N9/mk27juY4f4jT6kqD0QxPAajdhePfd2z9+e/vu+6///OWNxX8/Q6c6fEoSzJ2H+MJ5whJYnGUF+DS7nsU0RkS36KcRRymSs1j8jyB+OvrhClFkwQ0gEjruWQYyYwMeLV8YhCdxthTE4vFBnBjAE8bogGXWKDyQc2lhni7TyD55ttRxTxA6t80doNTI82i5AH0lNpdBjA2ajylKBYpwioUjf2NnGFtW95wQI64nJMwYZ3PhPCfOABFrSKbk1KimjdF9kkBeVjaCkG8jNifPnAGjtlUP8bmJhLsDUQv5KaZGGI/QUqDE5nKKEqoH/BiJ2EZysspCHTfiAjIdYcqc0QxzbrN5lMF6taQ/AImxp/2ErhITmQlyZvN5jBjTkUN2FsQoWdiwE5LGOvYzfgYlipzHTNjgJ8y8Q+R3yAOIx750PyPYSPflavAU1FWntCkQ+csys+TyCDOjficrOkdYSQ2Iv6HpCUkvFfgtaff/PWk/IWkYM8uKbkvU7a6NjFxTzvsZsd5P97dEfB9uW7oDls3Ix6/cQ7RMH2O4WXbb1//C/b9wu/954d53P9++XG8UGsRbbl3zzbrauid7d+5zQulErCg+5mrzzqEvzcYwKO3UYysun+QWMVzKOxkmMHBRhpSNkzHxBRHxJEYL2OHXXekk4oXriDsLxmHjr4atviWeLpMTNssfWOt1+XCaiwdHYjNe88txeNgQObrV3jyEle4V20g9LK8JSNvrkNAmM0k0LSTa60EZJPVoDkGzkFAruxUWXQuLjnS/TtUOC6BWZgU2Tg5st3qu74EJGMEzFaJ4JvOUp3qdXZXM28z0vmAaFQC7iHUFbDLdlVz3Lk+uLi+1K2TaIKGVm0lCRUb1MB6jGS6qU45ehcZ1c93dpNSgJ0Oh5oPS2tBodz7E4qa5BrttbaCprhQ0dS56bqvpQ8mEaNFz5/DgD5fJAmqHyw0vohG8PgtFlt/wN1GWRcbFEPE4D7gSnVwNEiJw5lCS9Fy5/DINNFUaorjVGyAIHy25LsjKx0YOkm4mGc/nOBR62rURGen8Kyh8rhXWX5X5zcHSki0h3ZN4duGc0mX2BEGJ+e26DOCMcHj/U8+jOSPwQrMUsk39bTWmQnb1N4qqhvJxRBcxKjqKLuY5XEl5SUd9K2OgfSvWDAHVQlI0wtNINlg9qEY3LbtGzmFv173cSEZOE81NzzRURXZNu4oZM6zbwFYsb9bkNVbrEIOm6R0+l+5tye2utW5rn1B2CQh4GT9L171CQ9CobSYzqEnGuzIsNbsYNXvHeoGXULtKk9BUv7V2uxW3skdYp4PBG3V+sNuuWhiar/eVKtLq6EM/nGCnL0A8hvAaeEkFV6mEo4cMwYZoovYkuWzALfJSFLcGXDnLjPTcVzW/7wUNP6jUOv6o4jW9WqXj95uVvu836yO/XhsOGq+hsYg4qfv5scsYXkTRVXH4osZ3DmCS9bu2OyFLqkydrFQVcXUAU28YBzD5yYszlQcsrkNAdF61GuNusztoVbrN/rjiDQedSjdoDSrDVtAejoeB3+mOX7vOuQJ7/WbgtUadSqseBBWvVZP0O91K22s0+l673xl5/dfFNgZWnstHEQsIr+J1+A8AAAD//wMAUEsDBBQABgAIAAAAIQAN0ZCftgAAABsBAAAnAAAAdGhlbWUvdGhlbWUvX3JlbHMvdGhlbWVNYW5hZ2VyLnhtbC5yZWxzhI9NCsIwFIT3gncIb2/TuhCRJt2I0K3UA4TkNQ02PyRR7O0NriwILodhvplpu5edyRNjMt4xaKoaCDrplXGawW247I5AUhZOidk7ZLBggo5vN+0VZ5FLKE0mJFIoLjGYcg4nSpOc0IpU+YCuOKOPVuQio6ZByLvQSPd1faDxmwF8xSS9YhB71QAZllCa/7P9OBqJZy8fFl3+UUFz2YUFKKLGzOAjm6pMBMpburrE3wAAAP//AwBQSwECLQAUAAYACAAAACEAm+hwT/wAAAAcAgAAEwAAAAAAAAAAAAAAAAAAAAAAW0NvbnRlbnRfVHlwZXNdLnhtbFBLAQItABQABgAIAAAAIQCl1qfnwAAAADYBAAALAAAAAAAAAAAAAAAAAC0BAABfcmVscy8ucmVsc1BLAQItABQABgAIAAAAIQBreZYWgwAAAIoAAAAcAAAAAAAAAAAAAAAAABYCAAB0aGVtZS90aGVtZS90aGVtZU1hbmFnZXIueG1sUEsBAi0AFAAGAAgAAAAhAG1NWavIBgAAjhoAABYAAAAAAAAAAAAAAAAA0wIAAHRoZW1lL3RoZW1lL3RoZW1lMS54bWxQSwECLQAUAAYACAAAACEADdGQn7YAAAAbAQAAJwAAAAAAAAAAAAAAAADPCQAAdGhlbWUvdGhlbWUvX3JlbHMvdGhlbWVNYW5hZ2VyLnhtbC5yZWxzUEsFBgAAAAAFAAUAXQEAAMoKAAAAADw/eG1sIHZlcnNpb249IjEuMCIgZW5jb2Rpbmc9IlVURi04IiBzdGFuZGFsb25lPSJ5ZXMiPz4NCjxhOmNsck1hcCB4bWxuczphPSJodHRwOi8vc2NoZW1hcy5vcGVueG1sZm9ybWF0cy5vcmcvZHJhd2luZ21sLzIwMDYvbWFpbiIgYmcxPSJsdDEiIHR4MT0iZGsxIiBiZzI9Imx0MiIgdHgyPSJkazIiIGFjY2VudDE9ImFjY2VudDEiIGFjY2VudDI9ImFjY2VudDIiIGFjY2VudDM9ImFjY2VudDMiIGFjY2VudDQ9ImFjY2VudDQiIGFjY2VudDU9ImFjY2VudDUiIGFjY2VudDY9ImFjY2VudDYiIGhsaW5rPSJobGluayIgZm9sSGxpbms9ImZvbEhsaW5rIi8+AAAAABMAAAAUAAAOAAAIAP////8ACAAAEwgAAAUAAAAACAAAEwgAAAYAAAAAAAAABQAAABIAAAAVAAAABwAEAAcAAAAAABIAAAAVAAAABAAHAAQAAAAEAAAACAAAAOUAAAAAAAAAAwAAAN8IhgCkF6oAlUa5AH419AAAAAAAEwAAABUAAAAAAAAAAQAAAP9AAIABABIAAAASAAAAAEBDewEAAQASAAAAAAAAABIAAAAAAAAAAAAAAAAAAAACEAAAAAAAAAATAAAAoAAAEABAAAD//wEAAAAHAFUAbgBrAG4AbwB3AG4A//8BAAgAAAAAAAAAAAAAAP//AQAAAAAA//8AAAIA//8AAAAA//8AAAIA//8AAAAABQAAAEcOkAEAAAICBgMFBAUCAwTvKgDgQXgAwAkAAAAAAAAA/wEAAAAAAABUAGkAbQBlAHMAIABOAGUAdwAgAFIAbwBtAGEAbgAAADUOkAECAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAgAAAAABTAHkAbQBiAG8AbAAAADMOkAEAAAILBgQCAgICAgT/KgDgQ3gAwAkAAAAAAAAA/wEAAAAAAABBAHIAaQBhAGwAAAA3DpABAAACDwUCAgIEAwIE/wIA4P+sAEABAAAAAAAAAJ8BAAAAAAAAQwBhAGwAaQBiAHIAaQAAAEESkAEBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABDAGEAbQBiAHIAaQBhACAATQBhAHQAaAAAACAABADxCIgIAPDEAgAAqQEAAAAAWVJDh1lSQ4cAAAAAAgABAAAAAgAAABEAAAABAAEAAAAEAAOQAQAAAAIAAAARAAAAAQABAAAAAQAAAAAAAAAhAwDwEAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAClBsAHtAC0AIGBcjAAAAAAAAAAAAAAAAAAABIAAAASAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAAAABAAAAA8BAACAD8/QEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACSFAAAAAACfH/DwAAJFAAABAnAAD///9/////f////3////9/////f////3////9/3wiGAAAEAAAyAAAAAAAAAAAAAAAAAAAAAAAAAAAAIQQAAAAAAAAAAAAAAAAAAAAAAAAQHAAABAAAAAAAAAAAAHgAAAB4AAAAAAAAAAAAAACgBQAAGkjOCAsAAAAAAAAA3AAAAAEAAAD//xIAAAAAAAAAAAAAAAAAAAAMAEQAYQB2AGkAZAAgAFAAaQBsAGEAdABvAAwARABhAHYAaQBkACAAUABpAGwAYQB0AG8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP7/AAADCgEAAAAAAAAAAAAAAAAAAAAAAAEAAADghZ/y+U9oEKuRCAArJ7PZMAAAANzSAgASAAAAAQAAAJgAAAACAAAAoAAAAAMAAACsAAAABAAAALgAAAAFAAAA0AAAAAYAAADcAAAABwAAAOgAAAAIAAAA/AAAAAkAAAAUAQAAEgAAACABAAAKAAAARAEAAAwAAABQAQAADQAAAFwBAAAOAAAAaAEAAA8AAABwAQAAEAAAAHgBAAATAAAAgAEAABEAAACIAQAAAgAAABAnAAAeAAAABAAAAAAAAAAeAAAABAAAAAAAAAAeAAAAEAAAAERhdmlkIFBpbGF0bwAAAAAeAAAABAAAAAAAAAAeAAAABAAAAAAAAAAeAAAADAAAAE5vcm1hbC5kb3RtAB4AAAAQAAAARGF2aWQgUGlsYXRvAAAAAB4AAAAEAAAAMgAAAB4AAAAcAAAATWljcm9zb2Z0IE1hY2ludG9zaCBXb3JkAAAAAEAAAAAARsMjAAAAAEAAAAAAFjZWpnrRAUAAAAAAFjZWpnrRAQMAAAABAAAAAwAAAAIAAAADAAAAEQAAAAMAAAAAAAAARwAAAEzRAgD/////DgAAAAEAAABsAAAAAAAAAAAAAAD/AAAAswAAAAAAAAAAAAAAZhkAANsRAAAgRU1GAAABAETRAgAIAAAAAQAAAAAAAAAAAAAAAAAAAOwEAACxAwAAQAEAAPAAAAAAAAAAAAAAAAAAAAAA4gQAgKkDABEAAAAMAAAACAAAAAoAAAAQAAAAAAAAAAAAAAAJAAAAEAAAAAABAAC0AAAADAAAABAAAAAAAAAAAAAAAAsAAAAQAAAAAAEAALQAAABRAAAAeNACAAAAAAAAAAAA/wAAALMAAAAAAAAAAAAAAAAAAAAAAAAAAAEAALQAAABQAAAAKAAAAHgAAAAA0AIAAAAAACAAzAAAAQAAtAAAACgAAAAAAQAAtAAAAAEAIAAAAAAAANACAAAAAAAAAAAAAAAAAAAAAAD/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////vr6+/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/76+vv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////7vf//+rz7v/Yzc3/0NLY/+DX2f/N4PL/3tXI/8jV4v/Q0cX/1tDI/9ve2f/U0tX/0NLQ/83I0P/I2N7/4tnI/9LZ4v/v6tz/5eXl////9////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////83g9//e3M3/vrG3/8TCxv/Xwrz/vdfu/8W/rv/K1tX/x8bB/8LJxv/Oxb7/yMTE/8vCwv+3scH/zd7Z/9DNyP/BwcT/z97X/82xq/////v////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////u9/v/+/Lu////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////++vr7/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/vr6+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////8OAAAAFAAAAAAAAAAQAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD+/wAAAwoBAAAAAAAAAAAAAAAAAAAAAAABAAAAAtXN1ZwuGxCTlwgAKyz5rjAAAADUAAAACwAAAAEAAABgAAAABQAAAGgAAAAGAAAAcAAAABEAAAB4AAAAFwAAAIAAAAALAAAAiAAAABAAAACQAAAAEwAAAJgAAAAWAAAAoAAAAA0AAACoAAAADAAAALUAAAACAAAAECcAAAMAAAABAAAAAwAAAAEAAAADAAAAEgAAAAMAAAAAAA8ACwAAAAAAAAALAAAAAAAAAAsAAAAAAAAACwAAAAAAAAAeEAAAAQAAAAEAAAAADBAAAAIAAAAeAAAABgAAAFRpdHJlAAMAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAIAAAADAAAABAAAAAUAAAAGAAAABwAAAP7///8JAAAACgAAAAsAAAAMAAAADQAAAA4AAAAPAAAAEAAAABEAAAASAAAAEwAAABQAAAAVAAAA/v///xcAAAAYAAAAGQAAABoAAAAbAAAAHAAAAB0AAAAeAAAAHwAAACAAAAAhAAAAIgAAACMAAAAkAAAAJQAAACYAAAAnAAAAKAAAACkAAAAqAAAAKwAAACwAAAAtAAAALgAAAC8AAAAwAAAAMQAAADIAAAAzAAAANAAAADUAAAA2AAAANwAAADgAAAA5AAAAOgAAADsAAAA8AAAAPQAAAD4AAAA/AAAAQAAAAEEAAABCAAAAQwAAAEQAAABFAAAARgAAAEcAAABIAAAASQAAAEoAAABLAAAATAAAAE0AAABOAAAATwAAAFAAAABRAAAAUgAAAFMAAABUAAAAVQAAAFYAAABXAAAAWAAAAFkAAABaAAAAWwAAAFwAAABdAAAAXgAAAF8AAABgAAAAYQAAAGIAAABjAAAAZAAAAGUAAABmAAAAZwAAAGgAAABpAAAAagAAAGsAAABsAAAAbQAAAG4AAABvAAAAcAAAAHEAAAByAAAAcwAAAHQAAAB1AAAAdgAAAHcAAAB4AAAAeQAAAHoAAAB7AAAAfAAAAH0AAAB+AAAAfwAAAIAAAACBAAAAggAAAIMAAACEAAAAhQAAAIYAAACHAAAAiAAAAIkAAACKAAAAiwAAAIwAAACNAAAAjgAAAI8AAACQAAAAkQAAAJIAAACTAAAAlAAAAJUAAACWAAAAlwAAAJgAAACZAAAAmgAAAJsAAACcAAAAnQAAAJ4AAACfAAAAoAAAAKEAAACiAAAAowAAAKQAAAClAAAApgAAAKcAAACoAAAAqQAAAKoAAACrAAAArAAAAK0AAACuAAAArwAAALAAAACxAAAAsgAAALMAAAC0AAAAtQAAALYAAAC3AAAAuAAAALkAAAC6AAAAuwAAALwAAAC9AAAAvgAAAL8AAADAAAAAwQAAAMIAAADDAAAAxAAAAMUAAADGAAAAxwAAAMgAAADJAAAAygAAAMsAAADMAAAAzQAAAM4AAADPAAAA0AAAANEAAADSAAAA0wAAANQAAADVAAAA1gAAANcAAADYAAAA2QAAANoAAADbAAAA3AAAAN0AAADeAAAA3wAAAOAAAADhAAAA4gAAAOMAAADkAAAA5QAAAOYAAADnAAAA6AAAAOkAAADqAAAA6wAAAOwAAADtAAAA7gAAAO8AAADwAAAA8QAAAPIAAADzAAAA9AAAAPUAAAD2AAAA9wAAAPgAAAD5AAAA+gAAAPsAAAD8AAAA/QAAAP4AAAD/AAAAAAEAAAEBAAACAQAAAwEAAAQBAAAFAQAABgEAAAcBAAAIAQAACQEAAAoBAAALAQAADAEAAA0BAAAOAQAADwEAABABAAARAQAAEgEAABMBAAAUAQAAFQEAABYBAAAXAQAAGAEAABkBAAAaAQAAGwEAABwBAAAdAQAAHgEAAB8BAAAgAQAAIQEAACIBAAAjAQAAJAEAACUBAAAmAQAAJwEAACgBAAApAQAAKgEAACsBAAAsAQAALQEAAC4BAAAvAQAAMAEAADEBAAAyAQAAMwEAADQBAAA1AQAANgEAADcBAAA4AQAAOQEAADoBAAA7AQAAPAEAAD0BAAA+AQAAPwEAAEABAABBAQAAQgEAAEMBAABEAQAARQEAAEYBAABHAQAASAEAAEkBAABKAQAASwEAAEwBAABNAQAATgEAAE8BAABQAQAAUQEAAFIBAABTAQAAVAEAAFUBAABWAQAAVwEAAFgBAABZAQAAWgEAAFsBAABcAQAAXQEAAF4BAABfAQAAYAEAAGEBAABiAQAAYwEAAGQBAABlAQAAZgEAAGcBAABoAQAAaQEAAGoBAABrAQAAbAEAAG0BAABuAQAAbwEAAHABAABxAQAAcgEAAHMBAAB0AQAAdQEAAHYBAAB3AQAAeAEAAHkBAAB6AQAAewEAAHwBAAB9AQAAfgEAAH8BAAD+////gQEAAIIBAACDAQAAhAEAAIUBAACGAQAAhwEAAP7////9/////f////3////9////jQEAAP7////+/////v////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////9SAG8AbwB0ACAARQBuAHQAcgB5AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFgAFAf//////////AwAAAAYJAgAAAAAAwAAAAAAAAEYAAAAAAAAAAAAAAAAgFZlgpnrRAY8BAACAAAAAAAAAADEAVABhAGIAbABlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAIB/////wUAAAD/////AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAB4aAAAAAAAAVwBvAHIAZABEAG8AYwB1AG0AZQBuAHQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABoAAgEBAAAA//////////8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAFAFMAdQBtAG0AYQByAHkASQBuAGYAbwByAG0AYQB0AGkAbwBuAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKAACAQIAAAAEAAAA/////wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABYAAAAM0wIAAAAAAAUARABvAGMAdQBtAGUAbgB0AFMAdQBtAG0AYQByAHkASQBuAGYAbwByAG0AYQB0AGkAbwBuAAAAAAAAAAAAAAA4AAIB////////////////AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAEAAAAQAAAAAAAAAQBDAG8AbQBwAE8AYgBqAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABIAAgD///////////////8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP///////////////wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA////////////////AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAP7///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////8BAP7/AwoAAP////8GCQIAAAAAAMAAAAAAAABGIAAAAERvY3VtZW50IE1pY3Jvc29mdCBXb3JkIDk3LTIwMDQACgAAAE1TV29yZERvYwAQAAAAV29yZC5Eb2N1bWVudC44APQ5snEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==" } + + - do: + get: + index: test + type: test + id: 1 + - length: { _source.attachment: 6 } + - match: { _source.attachment.content: "Test elasticsearch" } + - match: { _source.attachment.language: "et" } + - match: { _source.attachment.author: "David Pilato" } + - match: { _source.attachment.date: "2016-03-10T08:25:00Z" } + - match: { _source.attachment.content_length: "19" } + - match: { _source.attachment.content_type: "application/msword" } + + +--- +"Test ingest attachment processor with .docx file": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "attachment" : { + "source_field" : "field1" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + type: test + id: 1 + pipeline: "my_pipeline" + body: { field1: "UEsDBBQABgAIAAAAIQBtiidLZgEAAFQFAAATAAgCW0NvbnRlbnRfVHlwZXNdLnhtbCCiBAIooAACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAC0lMtugzAQRfeV+g/I2wqcdFFVVUgWfSzbSE0/wLEH4tYv2c7r7ztAgqooAalJNkgwc+89A3hGk41WyQp8kNbkZJgNSAKGWyFNmZOv2Vv6SJIQmRFMWQM52UIgk/HtzWi2dRASVJuQk0WM7onSwBegWcisA4OVwnrNIt76kjrGf1gJ9H4weKDcmggmprHyIOPRCxRsqWLyusHHDQnKSfLc9FVROWHOKclZxDKtqvSozoMKHcKVEQd06Y4sQ2XdExbShbvTCd8OyoMEqavR6gJqPvB1eikgmTIf35nGBrq2XlBh+VKjKOse7gijLQrJodVXbs5bDiHgd9IqayuaSbNnP8kR4lZBuDxF49sfDzGi4BoAO+dehDXMP69G8ce8F6TA3BmbK7g8RmvdCxHx1EJzHZ7NUdt0RWLn1FsXcAv4f4y9P66VOsWBHfgou/+6NhGtz54Pqk0gQBzJpvVOHP8CAAD//wMAUEsDBBQABgAIAAAAIQDHwie8/wAAAN8CAAALAAgCX3JlbHMvLnJlbHMgogQCKKAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAArJLNSgMxEIDvgu8Q5t7NtoqINNuLCL2JrA8wJtPd6OaHZKrt2xtF1IVlEexx/j6+SWa9ObhBvFLKNngFy6oGQV4HY32n4LG9W1yDyIze4BA8KThShk1zfrZ+oAG5DOXexiwKxWcFPXO8kTLrnhzmKkTypbILySGXMHUyon7BjuSqrq9k+s2AZsQUW6Mgbc0FiPYY6X9s6YjRIKPUIdEipjKd2JZdRIupI1Zggr4v6fzZURUyyGmhy78Lhd3OaroNeu/I85QXHZi8ITOvhDHOGS1PaTTu+JF5C8lI85Wes1md9sO437snj3aYeJfvWvUcqfsQkqOzbN4BAAD//wMAUEsDBBQABgAIAAAAIQATqj6H9gAAADEDAAAcAAgBd29yZC9fcmVscy9kb2N1bWVudC54bWwucmVscyCiBAEooAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKySy2rDMBBF94X+g5h9LTt9UELkbEoh29b9AEUeP6gsCc304b+vaEjr0GC68PJeMfeeQbPZfg5WvGOk3jsFRZaDQGd83btWwUv1eHUPgli7WlvvUMGIBNvy8mLzhFZzGqKuDyRSiiMFHXNYS0mmw0FT5gO69NL4OGhOMrYyaPOqW5SrPL+TcZoB5Umm2NUK4q6+BlGNAf+T7ZumN/jgzduAjs9UyA/cPyNzWo5SrI4tsoKJmaVEkOdBbpYEabzjSu8t/mL8WHMQt0tCcJqdAHzLg1nMMRRLMhCPFiefcdBz9atF6/9cw9E5IsiTQy+/AAAA//8DAFBLAwQUAAYACAAAACEA9WKOYGUCAAAOBwAAEQAAAHdvcmQvZG9jdW1lbnQueG1spFXfb9owEH6ftP8h8jtNwijQiFDR0qI+TKpK9zwZx0ksYp9lGyj763dOIGSbVtGSh9j367vv7mJncvsmq2DLjRWgUhJfRSTgikEmVJGSH6+PvTEJrKMqoxUonpI9t+R2+vXLZJdkwDaSKxcghLLJTrOUlM7pJAwtK7mk9koKZsBC7q4YyBDyXDAe7sBkYT+Ko3qnDTBuLea7p2pLLTnASTgPTVJ23PajaIyyUC3Gv4xAc4XGHIykDkVTYIRZb3QPMTV1YiUq4fYea9jCbFOyMSo5YPRaHj4mQQLJVlZHZ3jPtyF6WI4R5hySTcj80PKaXmh4hYRB2VLoU98+i4bG8gjybsGdYnc6Hlw29LmhO1xOgOfQz5ogWTXM30eMozMm4iHaiHMo/JnzyKT78e0+15pOc+PrjwH0/wbQxWXDWRjY6BOauAztSa1bLH+VfADrMORuafYyMsuSajyBkiVPhQJDVxUywpEF2PXAf9ZkilfcCrK9XzWqB4mmhj5lKRmNhg/X9/GI1FrH31yjbR7UJnidZi8piaK7m8Hw5rpVzXlON5XzlvEwGs8f6yzGv9z0lVsX4JG2TjDLqWHlJPR6/65dVgBrf1ktHTUOIQVmjTy2ohLZ/1zAHWVrEnZ9H1TWeoY1lPZmy5l7Nv9nukS7185m8WjW9EIXy19oxdMRxzdRnbfE/XA8qJG9w3fqIR3gIY4HdX8SI4rSncQVOAfyJFc871hLTjOO1+EoGnsxB3Adsdi4WjykY1BZ1FpNGW98ajX+lRZG+KIrofizcAxZfhseq28Kr7fNcMPTj2z6GwAA//8DAFBLAwQUAAYACAAAACEAbU1ZqyEGAACOGgAAFQAAAHdvcmQvdGhlbWUvdGhlbWUxLnhtbOxZy47bNhTdF+g/ENo7lm3Jj0E8gS3bSZuZJMg4abOkJVpihhINkpoZIwjQLyhQIC26KdBdC3QToP2D/kuKNv2IUpRlkzbdQToOEBSxAYuPcy8P7yUPJev2nauUgAvEOKZZ32ncch2AspBGOIv7zpPppNZ1ABcwiyChGeo7S8SdO8effnIbHokEpQhI+4wfwb6TCLE4qtd5KJshv0UXKJN9c8pSKGSVxfWIwUvpNyX1puu26ynEmQMymEq30+T3n6Wzh/M5DpFzXHkfE/mTCV40hISdFb7RymSYMwRzhY3OG8WFL3lAGLiApO/IgSJ6OUVXwgEEciE7+o6rPk79+HZ9bUTEHlvNbqI+K7uVQXTeVHYsnq0NPc/32oO1fwUgYhc37ozb4/banwLAMJQzLbnoWH/YG478FVYDlUWL71Fn1GoYeM1/awc/8IuvgVegsujt4CeTYBNDDVQWfUtMOs3AM/AKVBbbO/iOOxh5HQOvQAnB2fkO2vXbraCa7Royp+SeFd7zvUmnuYJvUHVtdZX2mdi31lL4nLKJBKjkQoEzIJYLNIehxAWQ4BnD4ATHiVx4C5hRLpvdpjtxW/K3+HqqpCICjxDUrMumkO80FXwADxleiL7zufTqaJBnObhLRYLD1ai7FvdgFusWb3/65u8fvgJ//frj21ff2vFcx49QFn+JYfZvAwjd4M13r//47fWb77/+85dXFviAwZkOn+IUcfAAXYLHNJWTswyAZuzdLKYJxLrFIIs5zGBhY0GPZfx09IMlJNCCGyIzkk+ZlAob8G7+3CB8lrBcYAvwfpIawFNKyZAy65zuF2PpUciz2D44y3XcYwgvbGMHW3ke5wu55rHNZZAgg+YjIlMOY5QhAYo+eo6QxewZxkZcT3HIKKdzAZ5hMITYGpIpnhmraWN0D6cyL0sbQZlvIzanT8GQEpv7EbowkXJ3QGJziYgRxrswFzC1MoYp0ZEnUCQ2kmdLFhoB50JmOkaEgnGEOLfZPGRLg+59KTH2tJ+SZWoimcDnNuQJpNTY4PQ8SGC6sHLGWaJjP+PncolC8IgKKwlq7pCiLvMgxWNfup9iZKT7+r39RMqQfYEUPTmzbQlEzf24JHOIlPP6lqanOLtW4Lek3X9/0n6KszChds09iKjboTeR8wHD1v20LeL7cNvSHVAW4Q9fuUcwzx4huVks0I/C/VG4//fCvW8/H16uNwqtbuOrm3XlJt175z7HhJyJJUEnXGk7l9OLJrJRVZTR+kFhkcjiajgDFzOoyoBR8QUWyVkCF3KYhhoh5ivXMQcLyuXpoJqtvosOkqenNCpbG43q2VQaQLFpl6dL1S7PIlG2tjubh7C1e1WL1cNyRaCwfRcS2mAmiZaFRKdqvIaEmtlBWPQsLLqF+70s1GWVFbn/ACz+1/C9kpFcb5CgqMhTaV9l9+CZ3hdMc9pNy/R6BdfDZNogoS03k4S2DBMYoe3mA+e6t0mpQa8IxS6NTvd95LoQkS1tIJlZA5dyz7V86SaEi74zl/eFspgupD9e6CYkcdZ3QrEK9H9RlgXjYgR5UsJUVzn/FAvEAMGpXOt6Gki24dZodoo5fqDkeu6HFzl10ZOM5nMUij0tm6rsK51Ye28ILio0l6TPkugSzEjOHkMZKL/TKAIYYS7W0Yww0xb3JopbcrXaisZ/ZpstCskigasTRRfzEq7KazraPBTT7VmZ9dVkZnGRpBufutcbFR2aaO45QIpT064f7++Q11htdN9gVUr3ttb1Kq3bd0rc/EDQqG0GM6gVjC3UNq0mtQPeEGjDrZfmvjPi0KfB9qotDojqvlLVdl5O0NlzufJH8nY1J4IrquhKPiME1d/KpRKo1kpdrgTIGe47L1x/4AVNP6i5XX9c81qeW+v6g1Zt4PutxthvuKNh86UMikjShl+OPZHPM2S5evmi2ndewKTVbfatkKZ1qt6s1JWxegHTaBovYMo3L2Ba9DsAy8i8aDcnvVZv2K71WoNJzRsNu7Ve0B7WRu2gM5qMAr/bm7x0wIUCe4NW4LXH3Vq7EQQ1r+0W9Lu9WsdrNgdeZ9Ade4OXq1jLmVfXKryK1/E/AAAA//8DAFBLAwQKAAAAAAAAACEAvOgH/fQnAAD0JwAAFwAAAGRvY1Byb3BzL3RodW1ibmFpbC5qcGVn/9j/4AAQSkZJRgABAQAASABIAAD/4QCARXhpZgAATU0AKgAAAAgABAEaAAUAAAABAAAAPgEbAAUAAAABAAAARgEoAAMAAAABAAIAAIdpAAQAAAABAAAATgAAAAAAAABIAAAAAQAAAEgAAAABAAOgAQADAAAAAQABAACgAgAEAAAAAQAAAWmgAwAEAAAAAQAAAgAAAAAA/+0AOFBob3Rvc2hvcCAzLjAAOEJJTQQEAAAAAAAAOEJJTQQlAAAAAAAQ1B2M2Y8AsgTpgAmY7PhCfv/AABEIAgABaQMBEQACEQEDEQH/xAAfAAABBQEBAQEBAQAAAAAAAAAAAQIDBAUGBwgJCgv/xAC1EAACAQMDAgQDBQUEBAAAAX0BAgMABBEFEiExQQYTUWEHInEUMoGRoQgjQrHBFVLR8CQzYnKCCQoWFxgZGiUmJygpKjQ1Njc4OTpDREVGR0hJSlNUVVZXWFlaY2RlZmdoaWpzdHV2d3h5eoOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4eLj5OXm5+jp6vHy8/T19vf4+fr/xAAfAQADAQEBAQEBAQEBAAAAAAAAAQIDBAUGBwgJCgv/xAC1EQACAQIEBAMEBwUEBAABAncAAQIDEQQFITEGEkFRB2FxEyIygQgUQpGhscEJIzNS8BVictEKFiQ04SXxFxgZGiYnKCkqNTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqCg4SFhoeIiYqSk5SVlpeYmZqio6Slpqeoqaqys7S1tre4ubrCw8TFxsfIycrS09TV1tfY2dri4+Tl5ufo6ery8/T19vf4+fr/2wBDAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQH/2wBDAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQH/3QAEAC7/2gAMAwEAAhEDEQA/AP7Yfgx8GPg9N8HvhRLL8KPhrLLL8NfAskkkngTws8kkj+F9LZ3d200s7uxLMzHczEk5JNAHpX/ClPg3/wBEl+GX/hBeFf8A5W0AH/ClPg3/ANEl+GX/AIQXhX/5W0AH/ClPg3/0SX4Zf+EF4V/+VtAB/wAKU+Df/RJfhl/4QXhX/wCVtAB/wpT4N/8ARJfhl/4QXhX/AOVtAB/wpT4N/wDRJfhl/wCEF4V/+VtAB/wpT4N/9El+GX/hBeFf/lbQAf8AClPg3/0SX4Zf+EF4V/8AlbQAf8KU+Df/AESX4Zf+EF4V/wDlbQAf8KU+Df8A0SX4Zf8AhBeFf/lbQAf8KU+Df/RJfhl/4QXhX/5W0AH/AApT4N/9El+GX/hBeFf/AJW0AH/ClPg3/wBEl+GX/hBeFf8A5W0AH/ClPg3/ANEl+GX/AIQXhX/5W0AH/ClPg3/0SX4Zf+EF4V/+VtAB/wAKU+Df/RJfhl/4QXhX/wCVtAB/wpT4N/8ARJfhl/4QXhX/AOVtAB/wpT4N/wDRJfhl/wCEF4V/+VtAB/wpT4N/9El+GX/hBeFf/lbQAf8AClPg3/0SX4Zf+EF4V/8AlbQAf8KU+Df/AESX4Zf+EF4V/wDlbQAf8KU+Df8A0SX4Zf8AhBeFf/lbQAf8KU+Df/RJfhl/4QXhX/5W0AH/AApT4N/9El+GX/hBeFf/AJW0AH/ClPg3/wBEl+GX/hBeFf8A5W0AH/ClPg3/ANEl+GX/AIQXhX/5W0AH/ClPg3/0SX4Zf+EF4V/+VtAB/wAKU+Df/RJfhl/4QXhX/wCVtAB/wpT4N/8ARJfhl/4QXhX/AOVtAB/wpT4N/wDRJfhl/wCEF4V/+VtAB/wpT4N/9El+GX/hBeFf/lbQAf8AClPg3/0SX4Zf+EF4V/8AlbQAf8KU+Df/AESX4Zf+EF4V/wDlbQAf8KU+Df8A0SX4Zf8AhBeFf/lbQAf8KU+Df/RJfhl/4QXhX/5W0AH/AApT4N/9El+GX/hBeFf/AJW0AH/ClPg3/wBEl+GX/hBeFf8A5W0AH/ClPg3/ANEl+GX/AIQXhX/5W0AH/ClPg3/0SX4Zf+EF4V/+VtAB/wAKU+Df/RJfhl/4QXhX/wCVtAB/wpT4N/8ARJfhl/4QXhX/AOVtAB/wpT4N/wDRJfhl/wCEF4V/+VtAB/wpT4N/9El+GX/hBeFf/lbQAf8AClPg3/0SX4Zf+EF4V/8AlbQB/Nd/wrT4c/8ARP8AwT/4Sug//INAH//Q/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9H+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/0v7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/T/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9T+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAeDfEX4/+Hvhf8QfD3gbxN4W8Vx6ZrfgHxz8TNQ+JS33w9sPh34O8G/DOXRk8fav4uvfEHj3RPE1jD4Xt/Evhu/v307wrq0M1hrUU+ny3Z03Xk0oAxf8Ahrr9n+W68P2WneN7nXLrxJ4x0D4f2sHh7wj411x9L8aeI9T13RrPwx4qGmeHrr/hD9esNV8Na1YeJNG8Uto+q+Eriz2+KLLRxNbvKAXNd/am+C2h+MbHwIPFDa34kn8V6n4Q1Ox8OWU+sSaBf6R8P/iz8Q7+7v4YALrVNPt7T4K+PPCs58JW/ia/tfH2nf8ACHXun22rW2qRaWAZN7+2L+z3bRaLNY+N5/ECa/4p0XwRYyeG/DPinWLW38XeIPh9r/xR07w7rupW+jHSvC2qQ+B/Dt5r2vweJ77SB4Ns7zRZvGjeHoNc0qa6ANXSP2r/ANn7WUia2+JWhxg6X4r1q7mlNxLpWl6R4Asbe7+IWsaj4lsobvwrD4f+H91dQeHvGvimLXZ/C3hzxhJH4O1LWovE8sWkOANvf2s/2eNN06x1bUviZpmnadqdvey6fdahpXiSyS7v9N8cH4a6l4YgW50aGR/HenePlfwnf/D0L/wnNlrUctnc+HonikKgGaP2wfgC/wARfCHwzh8arNrHje68eaVoWsLYXkXhaTxJ8O/Gfw8+Hmu+GJtcuY4Ihqs/jj4m+HPB+l3MME+g3njFbzwS+tW/jP8As/QL8A+nKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA/mXoA//1f7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQB5b8Sfgr8MPi/bahZ/EfwrB4otNV+HPxJ+Emo2t1qGsWltefDz4vW2gWfxF8OTw6ZqNlG0XiO18MaJDJf7Rq2mrZE6Pf6c11etcAHk2k/sUfs26Fq/hbXtJ8DavYaz4JstA07wrqNt8R/ifHc6LY+G/Gcfj/TbS0ZfGKqLZ/FKTX2p28ivBrFpqGs6LqsV3omu65p2oAEGtfsOfsueILvx9fal8MAbj4neIfEXivxo1j4y8f6Qmp+IvF3hL4ieCPE+rW0Ok+KLKDRbjXfDvxY+IUOprocenQXWpeJbrxBJEfEFtp+p2gBQ0H9gr9lfwv8AYz4f+HWraW2nW/hSx06S2+J3xZ86w0/wXbeLLLQ9MtLiTx01xBpf2Dx54107WdMSQWHiTT/FWvWXiKDVLbVLtJQB17+wd+y1qWhWfhXU/h9rOqeEdPvfH9/p3g7VPij8W9S8H6dc/FLRPEnh/wCIT6f4WvvHU2g2K+LNM8Y+K01aO10+FJbrxFrGoIqX17PcMAV9M/YB/ZM0jxR4H8Z2PwtuF8S/Di38NWvgzUZ/iF8T7saNF4R+Is3xY0DFjc+M5tNv5LP4gTvr8s2p2l5JfYTS79rnRkTT6AO9sf2UfgVp97a30PhPWJ5dP8S3vizR4NR+IPxI1XTvD+rah8V/AHxwuYvDek6n4vvNL8OaGPin8MPBPiu28LaJZ2Hhmxk0iXSbHSbfQNX1rS9SAPoqgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAP5l6AP/W/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9f+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/0P7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/R/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9L+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/0/7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/U/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9X+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/1v7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/X/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9D+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/0f7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/S/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9P+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/1P7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/V/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9b+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/1/7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/Q/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9H+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/0v7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/T/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9T+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/1f7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/W/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9f+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/0P7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/R/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9L+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/0/7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/U/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9X+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/1v7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/X/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9D+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/0f7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/S/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9P+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/1P7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/V/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9b+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/1/7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/Q/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9H+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/0v7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/T/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9T+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/1f7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/W/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9f+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/0P7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/R/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9L+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/0/7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/U/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9X+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/1v7Yfgx8Z/g9D8HvhRFL8V/hrFLF8NfAsckcnjvwskkcieF9LV0dG1IMjowKsrDcrAg4INAHpX/C6/g3/wBFa+GX/he+Ff8A5ZUAH/C6/g3/ANFa+GX/AIXvhX/5ZUAH/C6/g3/0Vr4Zf+F74V/+WVAB/wALr+Df/RWvhl/4XvhX/wCWVAB/wuv4N/8ARWvhl/4XvhX/AOWVAB/wuv4N/wDRWvhl/wCF74V/+WVAB/wuv4N/9Fa+GX/he+Ff/llQAf8AC6/g3/0Vr4Zf+F74V/8AllQAf8Lr+Df/AEVr4Zf+F74V/wDllQAf8Lr+Df8A0Vr4Zf8Ahe+Ff/llQAf8Lr+Df/RWvhl/4XvhX/5ZUAH/AAuv4N/9Fa+GX/he+Ff/AJZUAH/C6/g3/wBFa+GX/he+Ff8A5ZUAH/C6/g3/ANFa+GX/AIXvhX/5ZUAH/C6/g3/0Vr4Zf+F74V/+WVAB/wALr+Df/RWvhl/4XvhX/wCWVAB/wuv4N/8ARWvhl/4XvhX/AOWVAB/wuv4N/wDRWvhl/wCF74V/+WVAB/wuv4N/9Fa+GX/he+Ff/llQAf8AC6/g3/0Vr4Zf+F74V/8AllQAf8Lr+Df/AEVr4Zf+F74V/wDllQAf8Lr+Df8A0Vr4Zf8Ahe+Ff/llQAf8Lr+Df/RWvhl/4XvhX/5ZUAH/AAuv4N/9Fa+GX/he+Ff/AJZUAH/C6/g3/wBFa+GX/he+Ff8A5ZUAH/C6/g3/ANFa+GX/AIXvhX/5ZUAH/C6/g3/0Vr4Zf+F74V/+WVAB/wALr+Df/RWvhl/4XvhX/wCWVAB/wuv4N/8ARWvhl/4XvhX/AOWVAB/wuv4N/wDRWvhl/wCF74V/+WVAB/wuv4N/9Fa+GX/he+Ff/llQAf8AC6/g3/0Vr4Zf+F74V/8AllQAf8Lr+Df/AEVr4Zf+F74V/wDllQAf8Lr+Df8A0Vr4Zf8Ahe+Ff/llQAf8Lr+Df/RWvhl/4XvhX/5ZUAH/AAuv4N/9Fa+GX/he+Ff/AJZUAH/C6/g3/wBFa+GX/he+Ff8A5ZUAH/C6/g3/ANFa+GX/AIXvhX/5ZUAH/C6/g3/0Vr4Zf+F74V/+WVAB/wALr+Df/RWvhl/4XvhX/wCWVAB/wuv4N/8ARWvhl/4XvhX/AOWVAB/wuv4N/wDRWvhl/wCF74V/+WVAB/wuv4N/9Fa+GX/he+Ff/llQAf8AC6/g3/0Vr4Zf+F74V/8AllQB/Nd/wsv4c/8ARQPBP/hVaD/8nUAf/9kAAFBLAwQUAAYACAAAACEAuN5y8JsDAACACQAAEQAAAHdvcmQvc2V0dGluZ3MueG1stFZLj9s2EL4X6H8wdK5Wj8iOV403sL1xs8E6WazcS2+URNnE8iEMKatO0f/eESWunGYRuA3ii8n55s1vxn7z9k/BJ0cKmim58KKr0JtQWaiSyf3C+3238efeRBsiS8KVpAvvRLX39ubnn960qabGoJqeoAupU1EsvIMxdRoEujhQQfSVqqlEsFIgiMEr7ANB4Kmp/UKJmhiWM87MKYjDcOYNbtTCa0CmgwtfsAKUVpXpTFJVVaygw5ezgEvi9ia3qmgElcZGDIByzEFJfWC1dt7E//WG4ME5OX6riKPgTq+NwgvKbRWUzxaXpNcZ1KAKqjU+kOAuQSbHwMlXjp5jX2HsoUTrCs2j0J7OM5/+NwfxvxxofkklPXTPciDQ82QoQxTp3V4qIDlHVmI5E8zIu0FaflZKTNq0plDg2yCnw9ALOgA7oqrMEEMR1jXl3JK84JSgwzbdAxFITyexNiWtSMPNjuSZUTUqHQnm/Tqc9/DhVB+otCT6A8fD4Uk87fHiQIAUhkJWkwKjrZU0oLjTK9VHZdY4CoAv1VtocqQPQI+Mtg+sMA3Q3pGdl/GU9bOHjiQR2IAv5mmrStoV1AC7/I06A5tU5HJ/MZDCXQGspLuu8Zk5cbrBmjL2mS5l+aHRhqFH25DvyOBbCWC7MfInpMruVNMNJV2P9A8KZh9ow1m9ZQAK7mSJlPphwVhVUcAADCm6RdYxUK3t83tKSlzR3xk3OKcRLvxSu8OjUsaphuF8Fs5vN32mHXoJslxGr5fJS8jqOpldW0oFz1FF2i3LB3CnjkIT0VusiciBkcm2W6dBp5HD04pJh+cU9wM9R7Imd6Dv94AWhPMNjp4D7AoQacl0fUsre+ZbAvvR76ABL0pxDXx49tWtFQq/gWrqHm2B1D01nEqUJIMlk+aeCSfXTZ45K4kb7QxqZPnpCLZPY3va1OAT2xG7J5YqVrcCf/M4UIlD1tGAbkld92zK99HC42x/MFFHAIO3En917SXfxwMWWyzuMXshRVcZag+HURY72ZneKyd7NcoSJ0tG2dTJpqNs5mSzToZLlALu4icktjt28kpxrlpavh/xr0RuSxcMXzw7iXxcrr/0GGcaJ63GPWwUOOxXi0VJWqriDsmKp/655+t3yTxa9vDU7m+zQx49YWsfabUimpYD5kynvelfm+4zj1f+MrqN/WQ2XfnzeP3OX23iZbReXs+m6/jvYQ7cX6ebfwAAAP//AwBQSwMEFAAGAAgAAAAhAPC8NQHcAQAA8QUAABIAAAB3b3JkL2ZvbnRUYWJsZS54bWy8k9tq4zAQhu8LfQej+8ay4vRg6pQ0bWBh6cXSfQBFkW2xOhhJiTdvvyPZcQMhbJallUHI/4x+jT40j0+/lUx23DphdImyCUYJ18xshK5L9PN9dXOPEuep3lBpNC/Rnjv0NL++euyKymjvEtivXaFYiRrv2yJNHWu4om5iWq4hWBmrqIdfW6eK2l/b9oYZ1VIv1kIKv08JxrdosLGXuJiqEoy/GLZVXPu4P7VcgqPRrhGtO7h1l7h1xm5aaxh3Du6sZO+nqNCjTZafGCnBrHGm8hO4zFBRtILtGY4rJT8MZv9mQEYDxYpvtTaWriXAh0oSMEPzgX7SFZoqCCypFGsrYqCl2jieQWxHZYkwwSs8gzl8OZ6GGaUhkTXUOh5M+kTcyxVVQu4PKt160+ut8Kw5yDtqRaipDzlRQ2Dr1rhErxgGWa1Qr2QlykFYLEeFhKPiyAZlOio4KCz69BkPcReLPmMOnJn2AE5AvAvFXfLGu+SHUVSfAULwLYCYAY4AZvr5QMji9QjIEpS7+/xw/Q8gD38H0mO8HMgCypJnMDwDhnx4GfF1fD6G43cxYJh+BYahQZLvom782TYJzfFFbbIIFZPjVxHahOC75xMc8fL/2SbDws3/AAAA//8DAFBLAwQUAAYACAAAACEA4IvKVR8BAAARAgAAFAAAAHdvcmQvd2ViU2V0dGluZ3MueG1slNFRS8MwEAfwd8HvUPK+pRs6tKwbgkz2MgbVD5Cl1zWY5EIua7dv71nnRHyZbzku9+P+3Hx5dDbrIJJBX4rJOBcZeI218ftSvL2uRg8io6R8rSx6KMUJSCwXtzfzvuhhV0FK/JMyVjwVTpeiTSkUUpJuwSkaYwDPzQajU4nLuJdOxfdDGGl0QSWzM9akk5zm+UycmXiNgk1jNDyjPjjwaZiXESyL6Kk1gb61/hqtx1iHiBqIOI+zX55Txl+Yyd0fyBkdkbBJYw5z3migeHySDy9nf4D7/wHTC+B0sd57jGpn+QS8ScaYWPANlLXYbzcv8rOocYOpUh08UcUpLKyMhaETzBEsbSGuvW6zvuiULcXjTHBT/jrk4gMAAP//AwBQSwMEFAAGAAgAAAAhABZNBGBtAQAA7wIAABEACAFkb2NQcm9wcy9jb3JlLnhtbCCiBAEooAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJySUW+CMBSF35fsP5C+Q4suxhDAZDM+zcRkLlv21rVX7YS2aavIv18BxbH5tLd7e757uJw2nZ3KIjiCsULJDMURQQFIpriQ2wy9rhfhFAXWUclpoSRkqAaLZvn9Xcp0wpSBlVEajBNgA+8kbcJ0hnbO6QRjy3ZQUht5Qnpxo0xJnW/NFmvK9nQLeETIBJfgKKeO4sYw1L0jOlty1lvqgylaA84wFFCCdBbHUYyvrANT2psDrfKDLIWrNdxEL2JPn6zowaqqomrcon7/GL8vn1/aXw2FbLJigPKUs8QJV0Ce4mvpK3v4/ALmuuO+8TUzQJ0y+ZweBQ9WovBdC12EJvI91JUy3PrxQecxDpYZoZ2/yM58cODpglq39De7EcAf61/f+as3IwaOonkZedwSfZueY+52Ax74eJIuzIvyNn6arxcoH5F4EpJxGJM1mSajh4SQj2a9wfzVsDwv8G/Hi0GX0PCJ5t8AAAD//wMAUEsDBBQABgAIAAAAIQCBlv05MgsAAGRyAAAPAAAAd29yZC9zdHlsZXMueG1svJ3bctu6FYbvO9N34OiqvXB8jJ14trPHduLaUzvbO3Kaa4iEJNQgofLgQ5++IEhJkBdBcQGrvrIlan0A8eMHsEBS+u33l1RGTzwvhMrORvsf9kYRz2KViGx2Nvr5cLXzaRQVJcsSJlXGz0avvBj9/uWvf/nt+bQoXyUvIg3IitM0PhvNy3JxurtbxHOesuKDWvBMH5yqPGWlfpnPdlOWP1aLnVilC1aKiZCifN092Ns7HrWYfAhFTaci5l9VXKU8K038bs6lJqqsmItFsaQ9D6E9qzxZ5CrmRaFPOpUNL2UiW2H2jwAoFXGuCjUtP+iTaWtkUDp8f8/8l8o14CMOcLACpPHpzSxTOZtI3fq6JpGGjb7o5k9U/JVPWSXLon6Z3+fty/aV+XOlsrKInk9ZEQvxoEvWkFRo3vV5VoiRPsJZUZ4XgnUenNf/dB6Ji9J6+0IkYrRbl1j8Vx98YvJsdHC0fOeyrsHGe5Jls+V703zn6oddk7MRz3Z+juu3Jpp7NmL5zvi8DtxtT6z5a53uYvWq+dSbttFdQ3eUcdNf9VE+vVXxI0/GpT5wNtqri9Jv/ry5z4XKdZ88G33+3L455qm4FknCM+uD2Vwk/NecZz8Lnqzf//PK9Kv2jVhVmf7/8NOe0UsWybeXmC/qXqqPZqxuve91gKw/XYl14Sb8P0vYfttmXfFzzmqrRvtvEab6KMRBHVFYZ9vNrN6cu/kUqqDD9yro6L0K+vheBR2/V0En71XQp/cqyGD+nwWJLOEvjRFhMYC6jeNwI5rjMBua4/ASmuOwCprjcAKa4+joaI6jH6M5jm6K4JQqdvVCq7MfOnp7P3f7HOHH3T4l+HG3zwB+3O0Dvh93+/jux90+nPtxt4/eftztgzWe2yy1ohtts6wMdtlUqTJTJY9K/hJOY5lmmfyFhldPejwnOUkCTDOytRNxMC1m5vX2HmJM6j+fl3XKFalpNBWzKtdpb2jFefbEpU5AI5YkmkcIzHlZ5Y4W8enTOZ/ynGcxp+zYdFApMh5lVToh6JsLNiNj8Swhbr4lkWRQWHVoVpXz2iSCoFOnLM5VeNUUIxsfbkUR3lY1JLqopORErO80XcywwnMDgwlPDQwmPDMwmPDEwNKMqolaGlFLtTSiBmtpRO3W9E+qdmtpRO3W0ojaraWFt9uDKKUZ4u1Vx/7wvbtLqeod5+B6jMUsY3oBED7dtHum0T3L2Sxni3lU7x93Y+1zxpZzoZLX6IFiTluRqNb1potc6rMWWRXeoBs0KnOteET2WvGIDLbihVvsTi+T6wXaNU0+M64mZadpDWmQacdMVs2CNtxtrAzvYWsDXIm8ILNBN5agB3+vl7O1nBQj37qW4RVbs8Jt9XZUIq1eiySopVTxI80wfP264LlOyx6DSVdKSvXMEzriuMxV09dsyx8YSQZZ/lu6mLNCmFxpAzF8ql9eq47u2CL4hO4lExmNbt92UiZkRLeCuH64u40e1KJOM+uGoQFeqLJUKRmz3Qn82y8++TtNBc91Epy9Ep3tOdH2kIFdCoJJpiGphIikl5kiEyRzqOH9k79OFMsTGtp9zpvbQ0pORByzdNEsOgi8pcfFZz3+EKyGDO9fLBf1vhCVqR5IYNa2YVFN/s3j8KHuu4pIdob+qEqz/2iWuiaaDhe+TNjAhS8RjJp6eqj7L8HJbuDCT3YDR3Wyl5IVhXBeQvXmUZ3ukkd9vuHJX8tTUuXTStI14BJI1oJLIFkTKlmlWUF5xoZHeMKGR32+hF3G8Ai25AzvH7lIyMQwMColDIxKBgOj0sDASAUIv0PHgoXfpmPBwu/VaWBESwALRtXPSKd/oqs8FoyqnxkYVT8zMKp+ZmBU/ezwa8SnU70IpptiLCRVn7OQdBNNVvJ0oXKWvxIhv0k+YwQbpA3tPlfT+rkBlTU3cRMg6z1qSbjYbnBUIv/iE7Kq1SzKehHsiDIplSLaW1tPOCZy8961bWHmmYvgKpjN9lv+xClW4xaM6DJAAwuXzYKFT1MWLHyasmDh05QFC5+mLFj4NGXBwu9fvpcs5nMlE547jNhXkWi8YHF7bQlcox60V38rZvMyGs9Xl6hszPHe1sjlLtNG2PYCuwaK44OesDueiCpdVhQ+AXR8ODzYGHojePmgVk/wevm7EflxYCQs83h75Dq124g8GRgJy/w0MNKMUhuRfYP4V5Y/dnaEk77+s9qYcHS+k75etAruLLavI60iu7rgSV8v2rBKdB7H9SUuqM4wz7jjh5nHHY9xkZuCsZObMthXbkSfwX7wJ1EvRzGDpilvdcvP2+IOzZQ6aOT8s1LNxaaNq6TDn0S80av9rOBRJ+dw+NXWjVHG3Y6Dhxs3YvC440YMHoDciEEjkTMcNSS5KYPHJjdi8CDlRqBHKzgj4EYrGI8brWC8z2gFKT6jVcAqwI0YvBxwI9BGhQi0UQNWCm4Eyqgg3MuokII2KkSgjQoRaKPCBRjOqDAeZ1QY72NUSPExKqSgjQoRaKNCBNqoEIE2KkSgjeq5tneGexkVUtBGhQi0USECbVSzXgwwKozHGRXG+xgVUnyMCiloo0IE2qgQgTYqRKCNChFoo0IEyqgg3MuokII2KkSgjQoRaKM2z8f6GxXG44wK432MCik+RoUUtFEhAm1UiEAbFSLQRoUItFEhAmVUEO5lVEhBGxUi0EaFCLRRzaWDAKPCeJxRYbyPUSHFx6iQgjYqRKCNChFoo0IE2qgQgTYqRKCMCsK9jAopaKNCBNqoENHXP9vr6q5nQ/bxu57Ox0yGX7pqK/XD/v4BG3U4HLWslZs1/AGaC6Ueo86nZQ9NvjEMIiZSKLNF7bgXxOaaC6Soq/V/XPY/lmbTA78prH2Ax1zoB/CjoZFgT+Wor8vbkSDJO+rr6XYkWHUe9Y2+diSYBo/6Bl3jy+WdVHo6AsF9w4wVvO8I7xutrXDYxH1jtBUIW7hvZLYCYQP3jcdW4MeoHpzfRn8c2E7Hq5uiAaGvO1qEEzehr1tCrZbDMTTGUNHchKHquQlDZXQTUHo6MXhh3Si0wm6Un9TQZlip/Y3qJmClhgQvqQHGX2qI8pYaovykhgMjVmpIwErtPzi7CV5SA4y/1BDlLTVE+UkNpzKs1JCAlRoSsFIHTshOjL/UEOUtNUT5SQ0Xd1ipIQErNSRgpYYEL6kBxl9qiPKWGqL8pAZZMlpqSMBKDQlYqSHBS2qA8Zcaorylhqg+qc0uyobUKIWtcNwizArETchWIG5wtgI9siUr2jNbsgie2RLUaqk5LluyRXMThqrnJgyV0U1A6enE4IV1o9AKu1F+UuOypS6p/Y3qJmClxmVLTqlx2VKv1LhsqVdqXLbklhqXLXVJjcuWuqT2H5zdBC+pcdlSr9S4bKlXaly25JYaly11SY3LlrqkxmVLXVIHTshOjL/UuGypV2pctuSWGpctdUmNy5a6pMZlS11S47Ilp9S4bKlXaly21Cs1LltyS43LlrqkxmVLXVLjsqUuqXHZklNqXLbUKzUuW+qV2pEt7T5v/GpYzTa/d6c/XL4ueP3F8dYDM0nzxbntRUDzwZtk9etedXBdk6j9xbP2bVPh9oJhU6IJhEXFc11W3H7ll6OoeyWFPm+WJ/pwCYp0fLOvqcL65JefbhtzfRG0+dzGBc/eGpd1Y/fU1ojBqt72aRRzVfFz2wW31VHXaCKbH8PT/9xkiQY8t7+w1tQ1eWENSh+/5FLesebTauH+qOTTsjm6v2cen31zfNJ8YaEzPjeDhBOwu1mZ5mX7w3eOFm9+wqC9eu1o9fMqrjIutRt4R5ub+ylCm3tdweV/xZf/AQAA//8DAFBLAwQUAAYACAAAACEAQP7QLGkBAAC3AgAAEAAIAWRvY1Byb3BzL2FwcC54bWwgogQBKKAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACcUk1LxTAQvAv+h9K7L32CH8i+iCjiQUV4Vc8h2bbBNAnJKr5/78ZqrXgzp92ZZHZmCZy/j656w5Rt8Jt6vWrqCr0Oxvp+Uz+21wendZVJeaNc8Lipd5jrc7m/Bw8pRExkMVcs4fOmHojimRBZDziqvGLaM9OFNCriNvUidJ3VeBX064iexGHTHAt8J/QGzUGcBetJ8eyN/itqgi7+8lO7i6wnocUxOkUo78tLtzKBRhAzCm0g5Vo7omwYnht4UD1muQYxFfAcksnyEMRUwOWgktLE+5PrExCLFi5idFYr4sXKO6tTyKGj6k5p6ynkoSoKIJa3gENsUb8mS7viY9nCrfWTk6lgZ0n1ScXhy97cwVYrh5ccX3bKZQTxAxSVl/wY23BVYn/xv8FFpmdLwzYqXQafLtMtCNgyioa9zuNmAG54/ckVeX7rezTfd/4SZV9P0z+U66NVw+dzO98YZ5w/iPwAAAD//wMAUEsBAi0AFAAGAAgAAAAhAG2KJ0tmAQAAVAUAABMAAAAAAAAAAAAAAAAAAAAAAFtDb250ZW50X1R5cGVzXS54bWxQSwECLQAUAAYACAAAACEAx8InvP8AAADfAgAACwAAAAAAAAAAAAAAAACfAwAAX3JlbHMvLnJlbHNQSwECLQAUAAYACAAAACEAE6o+h/YAAAAxAwAAHAAAAAAAAAAAAAAAAADPBgAAd29yZC9fcmVscy9kb2N1bWVudC54bWwucmVsc1BLAQItABQABgAIAAAAIQD1Yo5gZQIAAA4HAAARAAAAAAAAAAAAAAAAAAcJAAB3b3JkL2RvY3VtZW50LnhtbFBLAQItABQABgAIAAAAIQBtTVmrIQYAAI4aAAAVAAAAAAAAAAAAAAAAAJsLAAB3b3JkL3RoZW1lL3RoZW1lMS54bWxQSwECLQAKAAAAAAAAACEAvOgH/fQnAAD0JwAAFwAAAAAAAAAAAAAAAADvEQAAZG9jUHJvcHMvdGh1bWJuYWlsLmpwZWdQSwECLQAUAAYACAAAACEAuN5y8JsDAACACQAAEQAAAAAAAAAAAAAAAAAYOgAAd29yZC9zZXR0aW5ncy54bWxQSwECLQAUAAYACAAAACEA8Lw1AdwBAADxBQAAEgAAAAAAAAAAAAAAAADiPQAAd29yZC9mb250VGFibGUueG1sUEsBAi0AFAAGAAgAAAAhAOCLylUfAQAAEQIAABQAAAAAAAAAAAAAAAAA7j8AAHdvcmQvd2ViU2V0dGluZ3MueG1sUEsBAi0AFAAGAAgAAAAhABZNBGBtAQAA7wIAABEAAAAAAAAAAAAAAAAAP0EAAGRvY1Byb3BzL2NvcmUueG1sUEsBAi0AFAAGAAgAAAAhAIGW/TkyCwAAZHIAAA8AAAAAAAAAAAAAAAAA40MAAHdvcmQvc3R5bGVzLnhtbFBLAQItABQABgAIAAAAIQBA/tAsaQEAALcCAAAQAAAAAAAAAAAAAAAAAEJPAABkb2NQcm9wcy9hcHAueG1sUEsFBgAAAAAMAAwABgMAAOFRAAAAAA==" } + + - do: + get: + index: test + type: test + id: 1 + - length: { _source.attachment: 6 } + - match: { _source.attachment.content: "Test elasticsearch" } + - match: { _source.attachment.language: "et" } + - match: { _source.attachment.author: "David Pilato" } + - match: { _source.attachment.date: "2016-03-10T08:24:00Z" } + - match: { _source.attachment.content_length: "19" } + - match: { _source.attachment.content_type: "application/vnd.openxmlformats-officedocument.wordprocessingml.document" } + diff --git a/plugins/ingest-geoip/src/test/resources/rest-api-spec/test/ingest_geoip/10_basic.yaml b/plugins/ingest-geoip/src/test/resources/rest-api-spec/test/ingest_geoip/10_basic.yaml index b522cb77780..cf86f4c7f4c 100644 --- a/plugins/ingest-geoip/src/test/resources/rest-api-spec/test/ingest_geoip/10_basic.yaml +++ b/plugins/ingest-geoip/src/test/resources/rest-api-spec/test/ingest_geoip/10_basic.yaml @@ -1,5 +1,25 @@ "Ingest plugin installed": - do: - cluster.stats: {} + cluster.state: {} - - match: { nodes.plugins.0.name: ingest-geoip } + - set: {master_node: master} + + - do: + nodes.info: {} + + - match: { nodes.$master.plugins.0.name: ingest-geoip } + - match: { nodes.$master.ingest.processors.0.type: append } + - match: { nodes.$master.ingest.processors.1.type: convert } + - match: { nodes.$master.ingest.processors.2.type: date } + - match: { nodes.$master.ingest.processors.3.type: fail } + - match: { nodes.$master.ingest.processors.4.type: foreach } + - match: { nodes.$master.ingest.processors.5.type: geoip } + - match: { nodes.$master.ingest.processors.6.type: gsub } + - match: { nodes.$master.ingest.processors.7.type: join } + - match: { nodes.$master.ingest.processors.8.type: lowercase } + - match: { nodes.$master.ingest.processors.9.type: remove } + - match: { nodes.$master.ingest.processors.10.type: rename } + - match: { nodes.$master.ingest.processors.11.type: set } + - match: { nodes.$master.ingest.processors.12.type: split } + - match: { nodes.$master.ingest.processors.13.type: trim } + - match: { nodes.$master.ingest.processors.14.type: uppercase } diff --git a/plugins/lang-javascript/build.gradle b/plugins/lang-javascript/build.gradle index dae5204db20..1f431241838 100644 --- a/plugins/lang-javascript/build.gradle +++ b/plugins/lang-javascript/build.gradle @@ -28,7 +28,7 @@ dependencies { integTest { cluster { - systemProperty 'es.script.inline', 'true' - systemProperty 'es.script.indexed', 'true' + setting 'script.inline', 'true' + setting 'script.indexed', 'true' } } diff --git a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/SimpleBench.java b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/SimpleBench.java deleted file mode 100644 index 3445c116057..00000000000 --- a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/SimpleBench.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.script.javascript; - -import org.elasticsearch.common.StopWatch; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.script.CompiledScript; -import org.elasticsearch.script.ExecutableScript; -import org.elasticsearch.script.ScriptService; - -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -/** - * - */ -public class SimpleBench { - - public static void main(String[] args) { - JavaScriptScriptEngineService se = new JavaScriptScriptEngineService(Settings.Builder.EMPTY_SETTINGS); - Object compiled = se.compile("x + y", Collections.emptyMap()); - CompiledScript compiledScript = new CompiledScript(ScriptService.ScriptType.INLINE, "testExecutableNoRuntimeParams", "js", compiled); - - Map vars = new HashMap(); - // warm up - for (int i = 0; i < 1000; i++) { - vars.put("x", i); - vars.put("y", i + 1); - se.executable(compiledScript, vars).run(); - } - - final long ITER = 100000; - - StopWatch stopWatch = new StopWatch().start(); - for (long i = 0; i < ITER; i++) { - se.executable(compiledScript, vars).run(); - } - System.out.println("Execute Took: " + stopWatch.stop().lastTaskTime()); - - stopWatch = new StopWatch().start(); - ExecutableScript executableScript = se.executable(compiledScript, vars); - for (long i = 0; i < ITER; i++) { - executableScript.run(); - } - System.out.println("Executable Took: " + stopWatch.stop().lastTaskTime()); - - stopWatch = new StopWatch().start(); - executableScript = se.executable(compiledScript, vars); - for (long i = 0; i < ITER; i++) { - for (Map.Entry entry : vars.entrySet()) { - executableScript.setNextVar(entry.getKey(), entry.getValue()); - } - executableScript.run(); - } - System.out.println("Executable (vars) Took: " + stopWatch.stop().lastTaskTime()); - } -} diff --git a/plugins/lang-python/build.gradle b/plugins/lang-python/build.gradle index 0980d7f62c9..c7466316806 100644 --- a/plugins/lang-python/build.gradle +++ b/plugins/lang-python/build.gradle @@ -28,8 +28,8 @@ dependencies { integTest { cluster { - systemProperty 'es.script.inline', 'true' - systemProperty 'es.script.indexed', 'true' + setting 'script.inline', 'true' + setting 'script.indexed', 'true' } } diff --git a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/SimpleBench.java b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/SimpleBench.java deleted file mode 100644 index d9559aef16c..00000000000 --- a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/SimpleBench.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.script.python; - -import org.elasticsearch.common.StopWatch; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.script.CompiledScript; -import org.elasticsearch.script.ExecutableScript; -import org.elasticsearch.script.ScriptService; - -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -/** - * - */ -public class SimpleBench { - - public static void main(String[] args) { - PythonScriptEngineService se = new PythonScriptEngineService(Settings.Builder.EMPTY_SETTINGS); - Object compiled = se.compile("x + y", Collections.emptyMap()); - CompiledScript compiledScript = new CompiledScript(ScriptService.ScriptType.INLINE, "SimpleBench", "python", compiled); - - - Map vars = new HashMap(); - // warm up - for (int i = 0; i < 1000; i++) { - vars.put("x", i); - vars.put("y", i + 1); - se.executable(compiledScript, vars).run(); - } - - final long ITER = 100000; - - StopWatch stopWatch = new StopWatch().start(); - for (long i = 0; i < ITER; i++) { - se.executable(compiledScript, vars).run(); - } - System.out.println("Execute Took: " + stopWatch.stop().lastTaskTime()); - - stopWatch = new StopWatch().start(); - ExecutableScript executableScript = se.executable(compiledScript, vars); - for (long i = 0; i < ITER; i++) { - executableScript.run(); - } - System.out.println("Executable Took: " + stopWatch.stop().lastTaskTime()); - - stopWatch = new StopWatch().start(); - executableScript = se.executable(compiledScript, vars); - for (long i = 0; i < ITER; i++) { - for (Map.Entry entry : vars.entrySet()) { - executableScript.setNextVar(entry.getKey(), entry.getValue()); - } - executableScript.run(); - } - System.out.println("Executable (vars) Took: " + stopWatch.stop().lastTaskTime()); - } -} diff --git a/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java b/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java index ab50d44a5c7..cf5a0cf41d7 100644 --- a/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java +++ b/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -71,9 +72,12 @@ import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField; public class AttachmentMapper extends FieldMapper { private static ESLogger logger = ESLoggerFactory.getLogger("mapper.attachment"); - public static final Setting INDEX_ATTACHMENT_IGNORE_ERRORS_SETTING = Setting.boolSetting("index.mapping.attachment.ignore_errors", true, false, Setting.Scope.INDEX); - public static final Setting INDEX_ATTACHMENT_DETECT_LANGUAGE_SETTING = Setting.boolSetting("index.mapping.attachment.detect_language", false, false, Setting.Scope.INDEX); - public static final Setting INDEX_ATTACHMENT_INDEXED_CHARS_SETTING = Setting.intSetting("index.mapping.attachment.indexed_chars", 100000, false, Setting.Scope.INDEX); + public static final Setting INDEX_ATTACHMENT_IGNORE_ERRORS_SETTING = + Setting.boolSetting("index.mapping.attachment.ignore_errors", true, Property.IndexScope); + public static final Setting INDEX_ATTACHMENT_DETECT_LANGUAGE_SETTING = + Setting.boolSetting("index.mapping.attachment.detect_language", false, Property.IndexScope); + public static final Setting INDEX_ATTACHMENT_INDEXED_CHARS_SETTING = + Setting.intSetting("index.mapping.attachment.indexed_chars", 100000, Property.IndexScope); public static final String CONTENT_TYPE = "attachment"; diff --git a/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/TikaImpl.java b/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/TikaImpl.java index fa9a2d06f8e..2babda8ad00 100644 --- a/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/TikaImpl.java +++ b/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/TikaImpl.java @@ -137,6 +137,8 @@ final class TikaImpl { perms.add(new SecurityPermission("putProviderProperty.BC")); perms.add(new SecurityPermission("insertProvider")); perms.add(new ReflectPermission("suppressAccessChecks")); + // xmlbeans, use by POI, needs to get the context classloader + perms.add(new RuntimePermission("getClassLoader")); perms.setReadOnly(); return perms; } diff --git a/plugins/mapper-attachments/src/main/plugin-metadata/plugin-security.policy b/plugins/mapper-attachments/src/main/plugin-metadata/plugin-security.policy index e23e9f4d0cf..adf76991b59 100644 --- a/plugins/mapper-attachments/src/main/plugin-metadata/plugin-security.policy +++ b/plugins/mapper-attachments/src/main/plugin-metadata/plugin-security.policy @@ -27,4 +27,6 @@ grant { permission java.security.SecurityPermission "insertProvider"; // TODO: fix POI XWPF to not do this: https://bz.apache.org/bugzilla/show_bug.cgi?id=58597 permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; + // needed by xmlbeans, as part of POI for MS xml docs + permission java.lang.RuntimePermission "getClassLoader"; }; diff --git a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/StandaloneRunner.java b/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/StandaloneRunner.java deleted file mode 100644 index 03c6e65047a..00000000000 --- a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/StandaloneRunner.java +++ /dev/null @@ -1,189 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.mapper.attachments; - -import org.apache.commons.cli.CommandLine; -import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.cli.CliTool; -import org.elasticsearch.common.cli.CliToolConfig; -import org.elasticsearch.common.cli.Terminal; -import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.io.PathUtils; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.env.Environment; -import org.elasticsearch.index.MapperTestUtils; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.DocumentMapperParser; -import org.elasticsearch.index.mapper.ParseContext; - -import java.io.IOException; -import java.io.InputStream; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.Locale; - -import static org.elasticsearch.common.cli.CliToolConfig.Builder.cmd; -import static org.elasticsearch.common.cli.CliToolConfig.Builder.option; -import static org.elasticsearch.common.io.Streams.copy; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.mapper.attachments.AttachmentUnitTestCase.getIndicesModuleWithRegisteredAttachmentMapper; -import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; - -/** - * This class provides a simple main class which can be used to test what is extracted from a given binary file. - * You can run it using - * -u file://URL/TO/YOUR/DOC - * --size set extracted size (default to mapper attachment size) - * BASE64 encoded binary - * - * Example: - * StandaloneRunner BASE64Text - * StandaloneRunner -u /tmp/mydoc.pdf - * StandaloneRunner -u /tmp/mydoc.pdf --size 1000000 - */ -@SuppressForbidden(reason = "commandline tool") -public class StandaloneRunner extends CliTool { - - private static final CliToolConfig CONFIG = CliToolConfig.config("tika", StandaloneRunner.class) - .cmds(TikaRunner.CMD) - .build(); - - static { - System.setProperty("es.path.home", "/tmp"); - } - - static class TikaRunner extends Command { - private static final String NAME = "tika"; - private final String url; - private final Integer size; - private final String base64text; - private final DocumentMapper docMapper; - - private static final CliToolConfig.Cmd CMD = cmd(NAME, TikaRunner.class) - .options(option("u", "url").required(false).hasArg(false)) - .options(option("t", "size").required(false).hasArg(false)) - .build(); - - protected TikaRunner(Terminal terminal, String url, Integer size, String base64text) throws IOException { - super(terminal); - this.size = size; - this.url = url; - this.base64text = base64text; - DocumentMapperParser mapperParser = MapperTestUtils.newMapperService(PathUtils.get("."), Settings.EMPTY, getIndicesModuleWithRegisteredAttachmentMapper()).documentMapperParser(); // use CWD b/c it won't be used - - String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/attachment/test/standalone/standalone-mapping.json"); - docMapper = mapperParser.parse("person", new CompressedXContent(mapping)); - } - - @Override - public ExitStatus execute(Settings settings, Environment env) throws Exception { - XContentBuilder builder = jsonBuilder().startObject().field("file").startObject(); - - if (base64text != null) { - // If base64 is provided - builder.field("_content", base64text); - } else { - // A file is provided - byte[] bytes = copyToBytes(PathUtils.get(url)); - builder.field("_content", bytes); - } - - if (size >= 0) { - builder.field("_indexed_chars", size); - } - - BytesReference json = builder.endObject().endObject().bytes(); - - ParseContext.Document doc = docMapper.parse("person", "person", "1", json).rootDoc(); - - terminal.println("## Extracted text"); - terminal.println("--------------------- BEGIN -----------------------"); - terminal.println(doc.get("file.content")); - terminal.println("---------------------- END ------------------------"); - terminal.println("## Metadata"); - printMetadataContent(doc, AttachmentMapper.FieldNames.AUTHOR); - printMetadataContent(doc, AttachmentMapper.FieldNames.CONTENT_LENGTH); - printMetadataContent(doc, AttachmentMapper.FieldNames.CONTENT_TYPE); - printMetadataContent(doc, AttachmentMapper.FieldNames.DATE); - printMetadataContent(doc, AttachmentMapper.FieldNames.KEYWORDS); - printMetadataContent(doc, AttachmentMapper.FieldNames.LANGUAGE); - printMetadataContent(doc, AttachmentMapper.FieldNames.NAME); - printMetadataContent(doc, AttachmentMapper.FieldNames.TITLE); - - return ExitStatus.OK; - } - - private void printMetadataContent(ParseContext.Document doc, String field) { - terminal.println("- " + field + ":" + doc.get(docMapper.mappers().getMapper("file." + field).fieldType().name())); - } - - public static byte[] copyToBytes(Path path) throws IOException { - try (InputStream is = Files.newInputStream(path); - BytesStreamOutput out = new BytesStreamOutput()) { - copy(is, out); - return out.bytes().toBytes(); - } - } - - public static Command parse(Terminal terminal, CommandLine cli) throws IOException { - String url = cli.getOptionValue("u"); - String base64text = null; - String sSize = cli.getOptionValue("size"); - Integer size = sSize != null ? Integer.parseInt(sSize) : -1; - if (url == null && cli.getArgs().length == 0) { - return exitCmd(ExitStatus.USAGE, terminal, "url or BASE64 content should be provided (type -h for help)"); - } - if (url == null) { - if (cli.getArgs().length == 0) { - return exitCmd(ExitStatus.USAGE, terminal, "url or BASE64 content should be provided (type -h for help)"); - } - base64text = cli.getArgs()[0]; - } else { - if (cli.getArgs().length == 1) { - return exitCmd(ExitStatus.USAGE, terminal, "url or BASE64 content should be provided. Not both. (type -h for help)"); - } - } - return new TikaRunner(terminal, url, size, base64text); - } - } - - public StandaloneRunner() { - super(CONFIG); - } - - - public static void main(String[] args) throws Exception { - StandaloneRunner pluginManager = new StandaloneRunner(); - pluginManager.execute(args); - } - - @Override - protected Command parse(String cmdName, CommandLine cli) throws Exception { - switch (cmdName.toLowerCase(Locale.ROOT)) { - case TikaRunner.NAME: return TikaRunner.parse(terminal, cli); - default: - assert false : "can't get here as cmd name is validated before this method is called"; - return exitCmd(ExitStatus.CODE_ERROR); - } - } -} diff --git a/plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/50_files_supported.yaml b/plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/50_files_supported.yaml new file mode 100644 index 00000000000..69991b9d0c0 --- /dev/null +++ b/plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/50_files_supported.yaml @@ -0,0 +1,78 @@ +setup: + - do: + indices.create: + index: test + body: + mappings: + test: + properties: + file: + type: attachment + fields: + content: + store: true + author: + store: true + date: + store: true + content_length: + store: true + content_type: + store: true + + - do: + cluster.health: + wait_for_status: yellow +--- +"Test mapper attachment processor with .doc file": + + - do: + index: + index: test + type: test + id: 1 + refresh: true + body: { file: "0M8R4KGxGuEAAAAAAAAAAAAAAAAAAAAAPgADAP7/CQAGAAAAAAAAAAAAAAAEAAAAjAEAAAAAAAAAEAAAjgEAAAEAAAD+////AAAAAIgBAACJAQAAigEAAIsBAAD////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////spcEAg+kMBAAA8BK/AAAAAAABEQABAAEACAAAEwgAAA4AYmpiaoI4gjgAAAAAAAAAAAAAAAAAAAAAAAAMBBYANA4AAOBSAADgUgAAEwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD//w8AAAAAAAAAAAD//w8AAAAAAAAAAAD//w8AAAAAAAAAAAAAAAAAAAAAALcAAAAAAFAHAAAAAAAAUAcAAMcUAAAAAAAAxxQAAAAAAADHFAAAAAAAAMcUAAAAAAAAxxQAABQAAAAAAAAAAAAAAP////8AAAAA2xQAAAAAAADbFAAAAAAAANsUAAAAAAAA2xQAAAwAAADnFAAADAAAANsUAAAAAAAA3hUAADABAADzFAAAAAAAAPMUAAAAAAAA8xQAAAAAAADzFAAAAAAAAPMUAAAAAAAA8xQAAAAAAADzFAAAAAAAAPMUAAAAAAAAVRUAAAIAAABXFQAAAAAAAFcVAAAAAAAAVxUAAAAAAABXFQAAAAAAAFcVAAAAAAAAVxUAACwAAAAOFwAAtgIAAMQZAABaAAAAgxUAABUAAAAAAAAAAAAAAAAAAAAAAAAAxxQAAAAAAADzFAAAAAAAAAAAAAAAAAAAAAAAAAAAAADzFAAAAAAAAPMUAAAAAAAA8xQAAAAAAADzFAAAAAAAAIMVAAAAAAAAGRUAAAAAAADHFAAAAAAAAMcUAAAAAAAA8xQAAAAAAAAAAAAAAAAAAPMUAAAAAAAAmBUAABYAAAAZFQAAAAAAABkVAAAAAAAAGRUAAAAAAADzFAAAFgAAAMcUAAAAAAAA8xQAAAAAAADHFAAAAAAAAPMUAAAAAAAAVRUAAAAAAAAAAAAAAAAAABkVAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA8xQAAAAAAABVFQAAAAAAAAAAAAAAAAAAGRUAAAAAAAAAAAAAAAAAABkVAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGRUAAAAAAAAAAAAAAAAAAP////8AAAAAgI6XYKZ60QEAAAAAAAAAAP////8AAAAACRUAABAAAAAZFQAAAAAAAAAAAAAAAAAAQRUAABQAAACuFQAAMAAAAN4VAAAAAAAAGRUAAAAAAAAeGgAAAAAAABkVAAAAAAAAHhoAAAAAAAAZFQAAAAAAABkVAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADHFAAAAAAAABkVAAAoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA8xQAAAAAAADzFAAAAAAAAPMUAAAAAAAAgxUAAAAAAACDFQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGRUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPMUAAAAAAAA8xQAAAAAAADzFAAAAAAAAN4VAAAAAAAA8xQAAAAAAADzFAAAAAAAAPMUAAAAAAAA8xQAAAAAAAAAAAAAAAAAAP////8AAAAA/////wAAAAD/////AAAAAAAAAAAAAAAA/////wAAAAD/////AAAAAP////8AAAAA/////wAAAAD/////AAAAAP////8AAAAA/////wAAAAD/////AAAAAP////8AAAAA/////wAAAAD/////AAAAAP////8AAAAA/////wAAAAD/////AAAAAB4aAAAAAAAA8xQAAAAAAADzFAAAAAAAAPMUAAAAAAAA8xQAAAAAAADzFAAAAAAAAPMUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADzFAAAAAAAAPMUAAAAAAAA8xQAAAAAAABQBwAAPQwAAI0TAAA6AQAABwAMAQ8ADQEAAAwEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFRlc3QgZWxhc3RpY3NlYXJjaA0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAABIIAAATCAAA/PgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYWaJVGuQAABhZo3wiGAAIACAAAEwgAAP0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAATIAMZBoATpwpBeqAB+wfC4gsMhBIbCJBSKwiQUjkIkFJJCJBSWwAAAXsMQCGLDEAgyQxAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAALgYPABIAAQB8AQ8ACAADAAMAAwAAAAQACAAAAJgAAACeAAAAngAAAJ4AAACeAAAAngAAAJ4AAACeAAAAngAAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAAHYCAAB2AgAAdgIAAHYCAAB2AgAAdgIAAHYCAAB2AgAAdgIAADYGAAA2BgAANgYAADYGAAA2BgAANgYAAD4CAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAACoAAAANgYAADYGAAAWAAAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAC4AAAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAAaAEAAEgBAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAAHACAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAANgYAADYGAAA2BgAAMgYAABgAAADGAwAA1gMAAOYDAAD2AwAABgQAABYEAAAmBAAANgQAAEYEAABWBAAAZgQAAHYEAACGBAAAlgQAAMYDAADWAwAA5gMAAPYDAAAGBAAAFgQAADIGAAAoAgAA2AEAAOgBAAAmBAAANgQAAEYEAABWBAAAZgQAAHYEAACGBAAAlgQAAMYDAADWAwAA5gMAAPYDAAAGBAAAFgQAACYEAAA2BAAARgQAAFYEAABmBAAAdgQAAIYEAACWBAAAxgMAANYDAADmAwAA9gMAAAYEAAAWBAAAJgQAADYEAABGBAAAVgQAAGYEAAB2BAAAhgQAAJYEAADGAwAA1gMAAOYDAAD2AwAABgQAABYEAAAmBAAANgQAAEYEAABWBAAAZgQAAHYEAACGBAAAlgQAAMYDAADWAwAA5gMAAPYDAAAGBAAAFgQAACYEAAA2BAAARgQAAFYEAABmBAAAdgQAAIYEAACWBAAAxgMAANYDAADmAwAA9gMAAAYEAAAWBAAAJgQAADYEAABGBAAAVgQAAGYEAAB2BAAAhgQAAJYEAAA4AQAAWAEAAPgBAAAIAgAAGAIAAFYCAAB+AgAAkAIAAKACAACwAgAAwAIAANACAACAAgAA4AIAAPACAAAAAwAAEAMAACADAAAwAwAAQAMAAOACAADwAgAAAAMAABADAAAgAwAAMAMAAEADAADgAgAA8AIAAAADAAAQAwAAIAMAADADAABAAwAA4AIAAPACAAAAAwAAEAMAACADAAAwAwAAQAMAAOACAADwAgAAAAMAABADAAAgAwAAMAMAAEADAADgAgAA8AIAAAADAAAQAwAAIAMAADADAABAAwAA4AIAAPACAAAAAwAAEAMAACADAAAwAwAAQAMAAOACAADwAgAAAAMAABADAAAgAwAAMAMAAEADAADgAgAA8AIAAAADAAAQAwAAIAMAADADAABAAwAA4AIAAPACAAAAAwAAEAMAACADAAAwAwAAQAMAAOACAADwAgAAAAMAABADAAAgAwAAMAMAAEADAADgAgAA8AIAAAADAAAQAwAAIAMAADADAABAAwAA4AIAAPACAAAAAwAAEAMAACADAAAwAwAAQAMAAOACAADwAgAAAAMAABADAAAgAwAAMAMAAEADAAAgAAAAT0oDAFBKAwBRSgMAX0gBBG1IDARuSAwEc0gMBHRIDAQAAAAAQAAAYPH/AgBAAAwQAAAAAAAAAAAGAE4AbwByAG0AYQBsAAAAAgAAABgAQ0oYAF9IAQRhShgAbUgMBHNIDAR0SAkEAAAAAAAAAAAAAAAAAAAAAAAAOgBBIPL/oQA6AAwNAAAAAAAAEAARAFAAbwBsAGkAYwBlACAAcABhAHIAIABkAOkAZgBhAHUAdAAAAAAAVgBpAPP/swBWAAwNAAAAAAAAMAYOAFQAYQBiAGwAZQBhAHUAIABOAG8AcgBtAGEAbAAAABwAF/YDAAA01gYAAQoDbAA01gYAAQUDAABh9gMAAAIACwAAADIAayD0/8EAMgAADQAAAAAAADAGDABBAHUAYwB1AG4AZQAgAGwAaQBzAHQAZQAAAAIADAAAAAAAUEsDBBQABgAIAAAAIQCb6HBP/AAAABwCAAATAAAAW0NvbnRlbnRfVHlwZXNdLnhtbKyRy2rDMBBF94X+g9C22HK6KKXYzqKPXR+L9AMGeWyL2CMhTULy9x07LpQSAoVuBNLMvffMqFwfxkHtMSbnqdKrvNAKyfrGUVfpz81Ldq9VYqAGBk9Y6SMmva6vr8rNMWBSoqZU6Z45PBiTbI8jpNwHJKm0Po7Aco2dCWC30KG5LYo7Yz0xEmc8eei6fMIWdgOr54M8n0hErtXjqW+KqjSEMDgLLKBmqpqzuohDuiDcU/OLLlvIclHO5ql3Id0sCe+ymugaVB8Q+Q1G4TAsQ+LP8xVIRov5ZeYz0b5tncXG290o68hn48XsTwCr/4n+zjTz39ZfAAAA//8DAFBLAwQUAAYACAAAACEApdan58AAAAA2AQAACwAAAF9yZWxzLy5yZWxzhI/PasMwDIfvhb2D0X1R0sMYJXYvpZBDL6N9AOEof2giG9sb69tPxwYKuwiEpO/3qT3+rov54ZTnIBaaqgbD4kM/y2jhdj2/f4LJhaSnJQhbeHCGo3vbtV+8UNGjPM0xG6VItjCVEg+I2U+8Uq5CZNHJENJKRds0YiR/p5FxX9cfmJ4Z4DZM0/UWUtc3YK6PqMn/s8MwzJ5PwX+vLOVFBG43lExp5GKhqC/jU72QqGWq1B7Qtbj51v0BAAD//wMAUEsDBBQABgAIAAAAIQBreZYWgwAAAIoAAAAcAAAAdGhlbWUvdGhlbWUvdGhlbWVNYW5hZ2VyLnhtbAzMTQrDIBBA4X2hd5DZN2O7KEVissuuu/YAQ5waQceg0p/b1+XjgzfO3xTVm0sNWSycBw2KZc0uiLfwfCynG6jaSBzFLGzhxxXm6XgYybSNE99JyHNRfSPVkIWttd0g1rUr1SHvLN1euSRqPYtHV+jT9yniResrJgoCOP0BAAD//wMAUEsDBBQABgAIAAAAIQBtTVmryAYAAI4aAAAWAAAAdGhlbWUvdGhlbWUvdGhlbWUxLnhtbOxZ3YrbRhS+L/QdhO4d/0n+WeINtmxv2uwmIXbS5nJWHkuTHWmMZrwbEwJ9gkIhLb0p9K6F3gTaN+i7pLTpQ/TMSJZn7HH2hy2E0jUs8vg7Z7455+g7I83dey8T6pzjjBOW9tz6nZrr4DRkM5JGPffpdFzpuA4XKJ0hylLcc1eYu/cOP/3kLjoQMU6wA/YpP0A9NxZicVCt8hCGEb/DFjiF3+YsS5CAr1lUnWXoAvwmtNqo1VrVBJHUdVKUgNtp/PvP4OzRfE5C7B6uvY8oTJEKLgdCmk2kb1yYDJYZRkuFnZ3VJYKveEAz5xzRngsTzdjFFL8UrkMRF/BDz62pP7d6eLeKDgojKvbYanZj9VfYFQazs4aaM4tOy0k9z/da/dK/AlCxixu1R61Rq/SnACgMYaU5F92nP+gOhn6B1UD5pcX3sD1s1g285r+5w7nvy4+BV6Dcv7eDH48DiKKBV6Ac7+/gPa/dCDwDr0A5vrWDb9f6Q69t4BUopiQ920HX/FYzWK+2hMwZvW+Fd31v3G4UzjcoqIayuuQUc5aKfbWWoBcsGwNAAikSJHXEaoHnKIQyDhAlpxlxjkkUQ+EtUMo4DNcatXGtCf/lx1NXKiLoACPNWvICJnxnSPJxeJiRhei5n4NXV4M8XzpHTMQkLGZVTgyL+yiNdIv3P33z9w9fOX/9+uP7N9/mk27juY4f4jT6kqD0QxPAajdhePfd2z9+e/vu+6///OWNxX8/Q6c6fEoSzJ2H+MJ5whJYnGUF+DS7nsU0RkS36KcRRymSs1j8jyB+OvrhClFkwQ0gEjruWQYyYwMeLV8YhCdxthTE4vFBnBjAE8bogGXWKDyQc2lhni7TyD55ttRxTxA6t80doNTI82i5AH0lNpdBjA2ajylKBYpwioUjf2NnGFtW95wQI64nJMwYZ3PhPCfOABFrSKbk1KimjdF9kkBeVjaCkG8jNifPnAGjtlUP8bmJhLsDUQv5KaZGGI/QUqDE5nKKEqoH/BiJ2EZysspCHTfiAjIdYcqc0QxzbrN5lMF6taQ/AImxp/2ErhITmQlyZvN5jBjTkUN2FsQoWdiwE5LGOvYzfgYlipzHTNjgJ8y8Q+R3yAOIx750PyPYSPflavAU1FWntCkQ+csys+TyCDOjficrOkdYSQ2Iv6HpCUkvFfgtaff/PWk/IWkYM8uKbkvU7a6NjFxTzvsZsd5P97dEfB9uW7oDls3Ix6/cQ7RMH2O4WXbb1//C/b9wu/954d53P9++XG8UGsRbbl3zzbrauid7d+5zQulErCg+5mrzzqEvzcYwKO3UYysun+QWMVzKOxkmMHBRhpSNkzHxBRHxJEYL2OHXXekk4oXriDsLxmHjr4atviWeLpMTNssfWOt1+XCaiwdHYjNe88txeNgQObrV3jyEle4V20g9LK8JSNvrkNAmM0k0LSTa60EZJPVoDkGzkFAruxUWXQuLjnS/TtUOC6BWZgU2Tg5st3qu74EJGMEzFaJ4JvOUp3qdXZXM28z0vmAaFQC7iHUFbDLdlVz3Lk+uLi+1K2TaIKGVm0lCRUb1MB6jGS6qU45ehcZ1c93dpNSgJ0Oh5oPS2tBodz7E4qa5BrttbaCprhQ0dS56bqvpQ8mEaNFz5/DgD5fJAmqHyw0vohG8PgtFlt/wN1GWRcbFEPE4D7gSnVwNEiJw5lCS9Fy5/DINNFUaorjVGyAIHy25LsjKx0YOkm4mGc/nOBR62rURGen8Kyh8rhXWX5X5zcHSki0h3ZN4duGc0mX2BEGJ+e26DOCMcHj/U8+jOSPwQrMUsk39bTWmQnb1N4qqhvJxRBcxKjqKLuY5XEl5SUd9K2OgfSvWDAHVQlI0wtNINlg9qEY3LbtGzmFv173cSEZOE81NzzRURXZNu4oZM6zbwFYsb9bkNVbrEIOm6R0+l+5tye2utW5rn1B2CQh4GT9L171CQ9CobSYzqEnGuzIsNbsYNXvHeoGXULtKk9BUv7V2uxW3skdYp4PBG3V+sNuuWhiar/eVKtLq6EM/nGCnL0A8hvAaeEkFV6mEo4cMwYZoovYkuWzALfJSFLcGXDnLjPTcVzW/7wUNP6jUOv6o4jW9WqXj95uVvu836yO/XhsOGq+hsYg4qfv5scsYXkTRVXH4osZ3DmCS9bu2OyFLqkydrFQVcXUAU28YBzD5yYszlQcsrkNAdF61GuNusztoVbrN/rjiDQedSjdoDSrDVtAejoeB3+mOX7vOuQJ7/WbgtUadSqseBBWvVZP0O91K22s0+l673xl5/dfFNgZWnstHEQsIr+J1+A8AAAD//wMAUEsDBBQABgAIAAAAIQAN0ZCftgAAABsBAAAnAAAAdGhlbWUvdGhlbWUvX3JlbHMvdGhlbWVNYW5hZ2VyLnhtbC5yZWxzhI9NCsIwFIT3gncIb2/TuhCRJt2I0K3UA4TkNQ02PyRR7O0NriwILodhvplpu5edyRNjMt4xaKoaCDrplXGawW247I5AUhZOidk7ZLBggo5vN+0VZ5FLKE0mJFIoLjGYcg4nSpOc0IpU+YCuOKOPVuQio6ZByLvQSPd1faDxmwF8xSS9YhB71QAZllCa/7P9OBqJZy8fFl3+UUFz2YUFKKLGzOAjm6pMBMpburrE3wAAAP//AwBQSwECLQAUAAYACAAAACEAm+hwT/wAAAAcAgAAEwAAAAAAAAAAAAAAAAAAAAAAW0NvbnRlbnRfVHlwZXNdLnhtbFBLAQItABQABgAIAAAAIQCl1qfnwAAAADYBAAALAAAAAAAAAAAAAAAAAC0BAABfcmVscy8ucmVsc1BLAQItABQABgAIAAAAIQBreZYWgwAAAIoAAAAcAAAAAAAAAAAAAAAAABYCAAB0aGVtZS90aGVtZS90aGVtZU1hbmFnZXIueG1sUEsBAi0AFAAGAAgAAAAhAG1NWavIBgAAjhoAABYAAAAAAAAAAAAAAAAA0wIAAHRoZW1lL3RoZW1lL3RoZW1lMS54bWxQSwECLQAUAAYACAAAACEADdGQn7YAAAAbAQAAJwAAAAAAAAAAAAAAAADPCQAAdGhlbWUvdGhlbWUvX3JlbHMvdGhlbWVNYW5hZ2VyLnhtbC5yZWxzUEsFBgAAAAAFAAUAXQEAAMoKAAAAADw/eG1sIHZlcnNpb249IjEuMCIgZW5jb2Rpbmc9IlVURi04IiBzdGFuZGFsb25lPSJ5ZXMiPz4NCjxhOmNsck1hcCB4bWxuczphPSJodHRwOi8vc2NoZW1hcy5vcGVueG1sZm9ybWF0cy5vcmcvZHJhd2luZ21sLzIwMDYvbWFpbiIgYmcxPSJsdDEiIHR4MT0iZGsxIiBiZzI9Imx0MiIgdHgyPSJkazIiIGFjY2VudDE9ImFjY2VudDEiIGFjY2VudDI9ImFjY2VudDIiIGFjY2VudDM9ImFjY2VudDMiIGFjY2VudDQ9ImFjY2VudDQiIGFjY2VudDU9ImFjY2VudDUiIGFjY2VudDY9ImFjY2VudDYiIGhsaW5rPSJobGluayIgZm9sSGxpbms9ImZvbEhsaW5rIi8+AAAAABMAAAAUAAAOAAAIAP////8ACAAAEwgAAAUAAAAACAAAEwgAAAYAAAAAAAAABQAAABIAAAAVAAAABwAEAAcAAAAAABIAAAAVAAAABAAHAAQAAAAEAAAACAAAAOUAAAAAAAAAAwAAAN8IhgCkF6oAlUa5AH419AAAAAAAEwAAABUAAAAAAAAAAQAAAP9AAIABABIAAAASAAAAAEBDewEAAQASAAAAAAAAABIAAAAAAAAAAAAAAAAAAAACEAAAAAAAAAATAAAAoAAAEABAAAD//wEAAAAHAFUAbgBrAG4AbwB3AG4A//8BAAgAAAAAAAAAAAAAAP//AQAAAAAA//8AAAIA//8AAAAA//8AAAIA//8AAAAABQAAAEcOkAEAAAICBgMFBAUCAwTvKgDgQXgAwAkAAAAAAAAA/wEAAAAAAABUAGkAbQBlAHMAIABOAGUAdwAgAFIAbwBtAGEAbgAAADUOkAECAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAgAAAAABTAHkAbQBiAG8AbAAAADMOkAEAAAILBgQCAgICAgT/KgDgQ3gAwAkAAAAAAAAA/wEAAAAAAABBAHIAaQBhAGwAAAA3DpABAAACDwUCAgIEAwIE/wIA4P+sAEABAAAAAAAAAJ8BAAAAAAAAQwBhAGwAaQBiAHIAaQAAAEESkAEBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABDAGEAbQBiAHIAaQBhACAATQBhAHQAaAAAACAABADxCIgIAPDEAgAAqQEAAAAAWVJDh1lSQ4cAAAAAAgABAAAAAgAAABEAAAABAAEAAAAEAAOQAQAAAAIAAAARAAAAAQABAAAAAQAAAAAAAAAhAwDwEAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAClBsAHtAC0AIGBcjAAAAAAAAAAAAAAAAAAABIAAAASAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAAAABAAAAA8BAACAD8/QEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACSFAAAAAACfH/DwAAJFAAABAnAAD///9/////f////3////9/////f////3////9/3wiGAAAEAAAyAAAAAAAAAAAAAAAAAAAAAAAAAAAAIQQAAAAAAAAAAAAAAAAAAAAAAAAQHAAABAAAAAAAAAAAAHgAAAB4AAAAAAAAAAAAAACgBQAAGkjOCAsAAAAAAAAA3AAAAAEAAAD//xIAAAAAAAAAAAAAAAAAAAAMAEQAYQB2AGkAZAAgAFAAaQBsAGEAdABvAAwARABhAHYAaQBkACAAUABpAGwAYQB0AG8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP7/AAADCgEAAAAAAAAAAAAAAAAAAAAAAAEAAADghZ/y+U9oEKuRCAArJ7PZMAAAANzSAgASAAAAAQAAAJgAAAACAAAAoAAAAAMAAACsAAAABAAAALgAAAAFAAAA0AAAAAYAAADcAAAABwAAAOgAAAAIAAAA/AAAAAkAAAAUAQAAEgAAACABAAAKAAAARAEAAAwAAABQAQAADQAAAFwBAAAOAAAAaAEAAA8AAABwAQAAEAAAAHgBAAATAAAAgAEAABEAAACIAQAAAgAAABAnAAAeAAAABAAAAAAAAAAeAAAABAAAAAAAAAAeAAAAEAAAAERhdmlkIFBpbGF0bwAAAAAeAAAABAAAAAAAAAAeAAAABAAAAAAAAAAeAAAADAAAAE5vcm1hbC5kb3RtAB4AAAAQAAAARGF2aWQgUGlsYXRvAAAAAB4AAAAEAAAAMgAAAB4AAAAcAAAATWljcm9zb2Z0IE1hY2ludG9zaCBXb3JkAAAAAEAAAAAARsMjAAAAAEAAAAAAFjZWpnrRAUAAAAAAFjZWpnrRAQMAAAABAAAAAwAAAAIAAAADAAAAEQAAAAMAAAAAAAAARwAAAEzRAgD/////DgAAAAEAAABsAAAAAAAAAAAAAAD/AAAAswAAAAAAAAAAAAAAZhkAANsRAAAgRU1GAAABAETRAgAIAAAAAQAAAAAAAAAAAAAAAAAAAOwEAACxAwAAQAEAAPAAAAAAAAAAAAAAAAAAAAAA4gQAgKkDABEAAAAMAAAACAAAAAoAAAAQAAAAAAAAAAAAAAAJAAAAEAAAAAABAAC0AAAADAAAABAAAAAAAAAAAAAAAAsAAAAQAAAAAAEAALQAAABRAAAAeNACAAAAAAAAAAAA/wAAALMAAAAAAAAAAAAAAAAAAAAAAAAAAAEAALQAAABQAAAAKAAAAHgAAAAA0AIAAAAAACAAzAAAAQAAtAAAACgAAAAAAQAAtAAAAAEAIAAAAAAAANACAAAAAAAAAAAAAAAAAAAAAAD/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////vr6+/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/76+vv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////7vf//+rz7v/Yzc3/0NLY/+DX2f/N4PL/3tXI/8jV4v/Q0cX/1tDI/9ve2f/U0tX/0NLQ/83I0P/I2N7/4tnI/9LZ4v/v6tz/5eXl////9////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////83g9//e3M3/vrG3/8TCxv/Xwrz/vdfu/8W/rv/K1tX/x8bB/8LJxv/Oxb7/yMTE/8vCwv+3scH/zd7Z/9DNyP/BwcT/z97X/82xq/////v////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////u9/v/+/Lu////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////zs7O/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////87Ozv/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////Ozs7///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////++vr7/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/5OTk/+Tk5P/k5OT/vr6+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////8OAAAAFAAAAAAAAAAQAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD+/wAAAwoBAAAAAAAAAAAAAAAAAAAAAAABAAAAAtXN1ZwuGxCTlwgAKyz5rjAAAADUAAAACwAAAAEAAABgAAAABQAAAGgAAAAGAAAAcAAAABEAAAB4AAAAFwAAAIAAAAALAAAAiAAAABAAAACQAAAAEwAAAJgAAAAWAAAAoAAAAA0AAACoAAAADAAAALUAAAACAAAAECcAAAMAAAABAAAAAwAAAAEAAAADAAAAEgAAAAMAAAAAAA8ACwAAAAAAAAALAAAAAAAAAAsAAAAAAAAACwAAAAAAAAAeEAAAAQAAAAEAAAAADBAAAAIAAAAeAAAABgAAAFRpdHJlAAMAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAIAAAADAAAABAAAAAUAAAAGAAAABwAAAP7///8JAAAACgAAAAsAAAAMAAAADQAAAA4AAAAPAAAAEAAAABEAAAASAAAAEwAAABQAAAAVAAAA/v///xcAAAAYAAAAGQAAABoAAAAbAAAAHAAAAB0AAAAeAAAAHwAAACAAAAAhAAAAIgAAACMAAAAkAAAAJQAAACYAAAAnAAAAKAAAACkAAAAqAAAAKwAAACwAAAAtAAAALgAAAC8AAAAwAAAAMQAAADIAAAAzAAAANAAAADUAAAA2AAAANwAAADgAAAA5AAAAOgAAADsAAAA8AAAAPQAAAD4AAAA/AAAAQAAAAEEAAABCAAAAQwAAAEQAAABFAAAARgAAAEcAAABIAAAASQAAAEoAAABLAAAATAAAAE0AAABOAAAATwAAAFAAAABRAAAAUgAAAFMAAABUAAAAVQAAAFYAAABXAAAAWAAAAFkAAABaAAAAWwAAAFwAAABdAAAAXgAAAF8AAABgAAAAYQAAAGIAAABjAAAAZAAAAGUAAABmAAAAZwAAAGgAAABpAAAAagAAAGsAAABsAAAAbQAAAG4AAABvAAAAcAAAAHEAAAByAAAAcwAAAHQAAAB1AAAAdgAAAHcAAAB4AAAAeQAAAHoAAAB7AAAAfAAAAH0AAAB+AAAAfwAAAIAAAACBAAAAggAAAIMAAACEAAAAhQAAAIYAAACHAAAAiAAAAIkAAACKAAAAiwAAAIwAAACNAAAAjgAAAI8AAACQAAAAkQAAAJIAAACTAAAAlAAAAJUAAACWAAAAlwAAAJgAAACZAAAAmgAAAJsAAACcAAAAnQAAAJ4AAACfAAAAoAAAAKEAAACiAAAAowAAAKQAAAClAAAApgAAAKcAAACoAAAAqQAAAKoAAACrAAAArAAAAK0AAACuAAAArwAAALAAAACxAAAAsgAAALMAAAC0AAAAtQAAALYAAAC3AAAAuAAAALkAAAC6AAAAuwAAALwAAAC9AAAAvgAAAL8AAADAAAAAwQAAAMIAAADDAAAAxAAAAMUAAADGAAAAxwAAAMgAAADJAAAAygAAAMsAAADMAAAAzQAAAM4AAADPAAAA0AAAANEAAADSAAAA0wAAANQAAADVAAAA1gAAANcAAADYAAAA2QAAANoAAADbAAAA3AAAAN0AAADeAAAA3wAAAOAAAADhAAAA4gAAAOMAAADkAAAA5QAAAOYAAADnAAAA6AAAAOkAAADqAAAA6wAAAOwAAADtAAAA7gAAAO8AAADwAAAA8QAAAPIAAADzAAAA9AAAAPUAAAD2AAAA9wAAAPgAAAD5AAAA+gAAAPsAAAD8AAAA/QAAAP4AAAD/AAAAAAEAAAEBAAACAQAAAwEAAAQBAAAFAQAABgEAAAcBAAAIAQAACQEAAAoBAAALAQAADAEAAA0BAAAOAQAADwEAABABAAARAQAAEgEAABMBAAAUAQAAFQEAABYBAAAXAQAAGAEAABkBAAAaAQAAGwEAABwBAAAdAQAAHgEAAB8BAAAgAQAAIQEAACIBAAAjAQAAJAEAACUBAAAmAQAAJwEAACgBAAApAQAAKgEAACsBAAAsAQAALQEAAC4BAAAvAQAAMAEAADEBAAAyAQAAMwEAADQBAAA1AQAANgEAADcBAAA4AQAAOQEAADoBAAA7AQAAPAEAAD0BAAA+AQAAPwEAAEABAABBAQAAQgEAAEMBAABEAQAARQEAAEYBAABHAQAASAEAAEkBAABKAQAASwEAAEwBAABNAQAATgEAAE8BAABQAQAAUQEAAFIBAABTAQAAVAEAAFUBAABWAQAAVwEAAFgBAABZAQAAWgEAAFsBAABcAQAAXQEAAF4BAABfAQAAYAEAAGEBAABiAQAAYwEAAGQBAABlAQAAZgEAAGcBAABoAQAAaQEAAGoBAABrAQAAbAEAAG0BAABuAQAAbwEAAHABAABxAQAAcgEAAHMBAAB0AQAAdQEAAHYBAAB3AQAAeAEAAHkBAAB6AQAAewEAAHwBAAB9AQAAfgEAAH8BAAD+////gQEAAIIBAACDAQAAhAEAAIUBAACGAQAAhwEAAP7////9/////f////3////9////jQEAAP7////+/////v////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////9SAG8AbwB0ACAARQBuAHQAcgB5AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFgAFAf//////////AwAAAAYJAgAAAAAAwAAAAAAAAEYAAAAAAAAAAAAAAAAgFZlgpnrRAY8BAACAAAAAAAAAADEAVABhAGIAbABlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAIB/////wUAAAD/////AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAB4aAAAAAAAAVwBvAHIAZABEAG8AYwB1AG0AZQBuAHQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABoAAgEBAAAA//////////8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAFAFMAdQBtAG0AYQByAHkASQBuAGYAbwByAG0AYQB0AGkAbwBuAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKAACAQIAAAAEAAAA/////wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABYAAAAM0wIAAAAAAAUARABvAGMAdQBtAGUAbgB0AFMAdQBtAG0AYQByAHkASQBuAGYAbwByAG0AYQB0AGkAbwBuAAAAAAAAAAAAAAA4AAIB////////////////AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAEAAAAQAAAAAAAAAQBDAG8AbQBwAE8AYgBqAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABIAAgD///////////////8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP///////////////wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA////////////////AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAP7///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////8BAP7/AwoAAP////8GCQIAAAAAAMAAAAAAAABGIAAAAERvY3VtZW50IE1pY3Jvc29mdCBXb3JkIDk3LTIwMDQACgAAAE1TV29yZERvYwAQAAAAV29yZC5Eb2N1bWVudC44APQ5snEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==" } + + - do: + search: + index: test + body: + fields: [file.content, file.author, file.date, file.content_length, file.content_type] + - match: { hits.total: 1 } + - match: { hits.hits.0.fields: { + file.content: ["Test elasticsearch\n"], + file.author: ["David Pilato"], + file.date: ["2016-03-10T08:25:00Z"], + file.content_length: ["205312"], + file.content_type: ["application/msword"] + } + } + + +--- +"Test mapper attachment processor with .docx file": + + - do: + index: + index: test + type: test + id: 1 + refresh: true + body: { file: "UEsDBBQABgAIAAAAIQBtiidLZgEAAFQFAAATAAgCW0NvbnRlbnRfVHlwZXNdLnhtbCCiBAIooAACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAC0lMtugzAQRfeV+g/I2wqcdFFVVUgWfSzbSE0/wLEH4tYv2c7r7ztAgqooAalJNkgwc+89A3hGk41WyQp8kNbkZJgNSAKGWyFNmZOv2Vv6SJIQmRFMWQM52UIgk/HtzWi2dRASVJuQk0WM7onSwBegWcisA4OVwnrNIt76kjrGf1gJ9H4weKDcmggmprHyIOPRCxRsqWLyusHHDQnKSfLc9FVROWHOKclZxDKtqvSozoMKHcKVEQd06Y4sQ2XdExbShbvTCd8OyoMEqavR6gJqPvB1eikgmTIf35nGBrq2XlBh+VKjKOse7gijLQrJodVXbs5bDiHgd9IqayuaSbNnP8kR4lZBuDxF49sfDzGi4BoAO+dehDXMP69G8ce8F6TA3BmbK7g8RmvdCxHx1EJzHZ7NUdt0RWLn1FsXcAv4f4y9P66VOsWBHfgou/+6NhGtz54Pqk0gQBzJpvVOHP8CAAD//wMAUEsDBBQABgAIAAAAIQDHwie8/wAAAN8CAAALAAgCX3JlbHMvLnJlbHMgogQCKKAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAArJLNSgMxEIDvgu8Q5t7NtoqINNuLCL2JrA8wJtPd6OaHZKrt2xtF1IVlEexx/j6+SWa9ObhBvFLKNngFy6oGQV4HY32n4LG9W1yDyIze4BA8KThShk1zfrZ+oAG5DOXexiwKxWcFPXO8kTLrnhzmKkTypbILySGXMHUyon7BjuSqrq9k+s2AZsQUW6Mgbc0FiPYY6X9s6YjRIKPUIdEipjKd2JZdRIupI1Zggr4v6fzZURUyyGmhy78Lhd3OaroNeu/I85QXHZi8ITOvhDHOGS1PaTTu+JF5C8lI85Wes1md9sO437snj3aYeJfvWvUcqfsQkqOzbN4BAAD//wMAUEsDBBQABgAIAAAAIQATqj6H9gAAADEDAAAcAAgBd29yZC9fcmVscy9kb2N1bWVudC54bWwucmVscyCiBAEooAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKySy2rDMBBF94X+g5h9LTt9UELkbEoh29b9AEUeP6gsCc304b+vaEjr0GC68PJeMfeeQbPZfg5WvGOk3jsFRZaDQGd83btWwUv1eHUPgli7WlvvUMGIBNvy8mLzhFZzGqKuDyRSiiMFHXNYS0mmw0FT5gO69NL4OGhOMrYyaPOqW5SrPL+TcZoB5Umm2NUK4q6+BlGNAf+T7ZumN/jgzduAjs9UyA/cPyNzWo5SrI4tsoKJmaVEkOdBbpYEabzjSu8t/mL8WHMQt0tCcJqdAHzLg1nMMRRLMhCPFiefcdBz9atF6/9cw9E5IsiTQy+/AAAA//8DAFBLAwQUAAYACAAAACEA9WKOYGUCAAAOBwAAEQAAAHdvcmQvZG9jdW1lbnQueG1spFXfb9owEH6ftP8h8jtNwijQiFDR0qI+TKpK9zwZx0ksYp9lGyj763dOIGSbVtGSh9j367vv7mJncvsmq2DLjRWgUhJfRSTgikEmVJGSH6+PvTEJrKMqoxUonpI9t+R2+vXLZJdkwDaSKxcghLLJTrOUlM7pJAwtK7mk9koKZsBC7q4YyBDyXDAe7sBkYT+Ko3qnDTBuLea7p2pLLTnASTgPTVJ23PajaIyyUC3Gv4xAc4XGHIykDkVTYIRZb3QPMTV1YiUq4fYea9jCbFOyMSo5YPRaHj4mQQLJVlZHZ3jPtyF6WI4R5hySTcj80PKaXmh4hYRB2VLoU98+i4bG8gjybsGdYnc6Hlw29LmhO1xOgOfQz5ogWTXM30eMozMm4iHaiHMo/JnzyKT78e0+15pOc+PrjwH0/wbQxWXDWRjY6BOauAztSa1bLH+VfADrMORuafYyMsuSajyBkiVPhQJDVxUywpEF2PXAf9ZkilfcCrK9XzWqB4mmhj5lKRmNhg/X9/GI1FrH31yjbR7UJnidZi8piaK7m8Hw5rpVzXlON5XzlvEwGs8f6yzGv9z0lVsX4JG2TjDLqWHlJPR6/65dVgBrf1ktHTUOIQVmjTy2ohLZ/1zAHWVrEnZ9H1TWeoY1lPZmy5l7Nv9nukS7185m8WjW9EIXy19oxdMRxzdRnbfE/XA8qJG9w3fqIR3gIY4HdX8SI4rSncQVOAfyJFc871hLTjOO1+EoGnsxB3Adsdi4WjykY1BZ1FpNGW98ajX+lRZG+KIrofizcAxZfhseq28Kr7fNcMPTj2z6GwAA//8DAFBLAwQUAAYACAAAACEAbU1ZqyEGAACOGgAAFQAAAHdvcmQvdGhlbWUvdGhlbWUxLnhtbOxZy47bNhTdF+g/ENo7lm3Jj0E8gS3bSZuZJMg4abOkJVpihhINkpoZIwjQLyhQIC26KdBdC3QToP2D/kuKNv2IUpRlkzbdQToOEBSxAYuPcy8P7yUPJev2nauUgAvEOKZZ32ncch2AspBGOIv7zpPppNZ1ABcwiyChGeo7S8SdO8effnIbHokEpQhI+4wfwb6TCLE4qtd5KJshv0UXKJN9c8pSKGSVxfWIwUvpNyX1puu26ynEmQMymEq30+T3n6Wzh/M5DpFzXHkfE/mTCV40hISdFb7RymSYMwRzhY3OG8WFL3lAGLiApO/IgSJ6OUVXwgEEciE7+o6rPk79+HZ9bUTEHlvNbqI+K7uVQXTeVHYsnq0NPc/32oO1fwUgYhc37ozb4/banwLAMJQzLbnoWH/YG478FVYDlUWL71Fn1GoYeM1/awc/8IuvgVegsujt4CeTYBNDDVQWfUtMOs3AM/AKVBbbO/iOOxh5HQOvQAnB2fkO2vXbraCa7Royp+SeFd7zvUmnuYJvUHVtdZX2mdi31lL4nLKJBKjkQoEzIJYLNIehxAWQ4BnD4ATHiVx4C5hRLpvdpjtxW/K3+HqqpCICjxDUrMumkO80FXwADxleiL7zufTqaJBnObhLRYLD1ai7FvdgFusWb3/65u8fvgJ//frj21ff2vFcx49QFn+JYfZvAwjd4M13r//47fWb77/+85dXFviAwZkOn+IUcfAAXYLHNJWTswyAZuzdLKYJxLrFIIs5zGBhY0GPZfx09IMlJNCCGyIzkk+ZlAob8G7+3CB8lrBcYAvwfpIawFNKyZAy65zuF2PpUciz2D44y3XcYwgvbGMHW3ke5wu55rHNZZAgg+YjIlMOY5QhAYo+eo6QxewZxkZcT3HIKKdzAZ5hMITYGpIpnhmraWN0D6cyL0sbQZlvIzanT8GQEpv7EbowkXJ3QGJziYgRxrswFzC1MoYp0ZEnUCQ2kmdLFhoB50JmOkaEgnGEOLfZPGRLg+59KTH2tJ+SZWoimcDnNuQJpNTY4PQ8SGC6sHLGWaJjP+PncolC8IgKKwlq7pCiLvMgxWNfup9iZKT7+r39RMqQfYEUPTmzbQlEzf24JHOIlPP6lqanOLtW4Lek3X9/0n6KszChds09iKjboTeR8wHD1v20LeL7cNvSHVAW4Q9fuUcwzx4huVks0I/C/VG4//fCvW8/H16uNwqtbuOrm3XlJt175z7HhJyJJUEnXGk7l9OLJrJRVZTR+kFhkcjiajgDFzOoyoBR8QUWyVkCF3KYhhoh5ivXMQcLyuXpoJqtvosOkqenNCpbG43q2VQaQLFpl6dL1S7PIlG2tjubh7C1e1WL1cNyRaCwfRcS2mAmiZaFRKdqvIaEmtlBWPQsLLqF+70s1GWVFbn/ACz+1/C9kpFcb5CgqMhTaV9l9+CZ3hdMc9pNy/R6BdfDZNogoS03k4S2DBMYoe3mA+e6t0mpQa8IxS6NTvd95LoQkS1tIJlZA5dyz7V86SaEi74zl/eFspgupD9e6CYkcdZ3QrEK9H9RlgXjYgR5UsJUVzn/FAvEAMGpXOt6Gki24dZodoo5fqDkeu6HFzl10ZOM5nMUij0tm6rsK51Ye28ILio0l6TPkugSzEjOHkMZKL/TKAIYYS7W0Yww0xb3JopbcrXaisZ/ZpstCskigasTRRfzEq7KazraPBTT7VmZ9dVkZnGRpBufutcbFR2aaO45QIpT064f7++Q11htdN9gVUr3ttb1Kq3bd0rc/EDQqG0GM6gVjC3UNq0mtQPeEGjDrZfmvjPi0KfB9qotDojqvlLVdl5O0NlzufJH8nY1J4IrquhKPiME1d/KpRKo1kpdrgTIGe47L1x/4AVNP6i5XX9c81qeW+v6g1Zt4PutxthvuKNh86UMikjShl+OPZHPM2S5evmi2ndewKTVbfatkKZ1qt6s1JWxegHTaBovYMo3L2Ba9DsAy8i8aDcnvVZv2K71WoNJzRsNu7Ve0B7WRu2gM5qMAr/bm7x0wIUCe4NW4LXH3Vq7EQQ1r+0W9Lu9WsdrNgdeZ9Ade4OXq1jLmVfXKryK1/E/AAAA//8DAFBLAwQKAAAAAAAAACEAvOgH/fQnAAD0JwAAFwAAAGRvY1Byb3BzL3RodW1ibmFpbC5qcGVn/9j/4AAQSkZJRgABAQAASABIAAD/4QCARXhpZgAATU0AKgAAAAgABAEaAAUAAAABAAAAPgEbAAUAAAABAAAARgEoAAMAAAABAAIAAIdpAAQAAAABAAAATgAAAAAAAABIAAAAAQAAAEgAAAABAAOgAQADAAAAAQABAACgAgAEAAAAAQAAAWmgAwAEAAAAAQAAAgAAAAAA/+0AOFBob3Rvc2hvcCAzLjAAOEJJTQQEAAAAAAAAOEJJTQQlAAAAAAAQ1B2M2Y8AsgTpgAmY7PhCfv/AABEIAgABaQMBEQACEQEDEQH/xAAfAAABBQEBAQEBAQAAAAAAAAAAAQIDBAUGBwgJCgv/xAC1EAACAQMDAgQDBQUEBAAAAX0BAgMABBEFEiExQQYTUWEHInEUMoGRoQgjQrHBFVLR8CQzYnKCCQoWFxgZGiUmJygpKjQ1Njc4OTpDREVGR0hJSlNUVVZXWFlaY2RlZmdoaWpzdHV2d3h5eoOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4eLj5OXm5+jp6vHy8/T19vf4+fr/xAAfAQADAQEBAQEBAQEBAAAAAAAAAQIDBAUGBwgJCgv/xAC1EQACAQIEBAMEBwUEBAABAncAAQIDEQQFITEGEkFRB2FxEyIygQgUQpGhscEJIzNS8BVictEKFiQ04SXxFxgZGiYnKCkqNTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqCg4SFhoeIiYqSk5SVlpeYmZqio6Slpqeoqaqys7S1tre4ubrCw8TFxsfIycrS09TV1tfY2dri4+Tl5ufo6ery8/T19vf4+fr/2wBDAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQH/2wBDAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQH/3QAEAC7/2gAMAwEAAhEDEQA/AP7Yfgx8GPg9N8HvhRLL8KPhrLLL8NfAskkkngTws8kkj+F9LZ3d200s7uxLMzHczEk5JNAHpX/ClPg3/wBEl+GX/hBeFf8A5W0AH/ClPg3/ANEl+GX/AIQXhX/5W0AH/ClPg3/0SX4Zf+EF4V/+VtAB/wAKU+Df/RJfhl/4QXhX/wCVtAB/wpT4N/8ARJfhl/4QXhX/AOVtAB/wpT4N/wDRJfhl/wCEF4V/+VtAB/wpT4N/9El+GX/hBeFf/lbQAf8AClPg3/0SX4Zf+EF4V/8AlbQAf8KU+Df/AESX4Zf+EF4V/wDlbQAf8KU+Df8A0SX4Zf8AhBeFf/lbQAf8KU+Df/RJfhl/4QXhX/5W0AH/AApT4N/9El+GX/hBeFf/AJW0AH/ClPg3/wBEl+GX/hBeFf8A5W0AH/ClPg3/ANEl+GX/AIQXhX/5W0AH/ClPg3/0SX4Zf+EF4V/+VtAB/wAKU+Df/RJfhl/4QXhX/wCVtAB/wpT4N/8ARJfhl/4QXhX/AOVtAB/wpT4N/wDRJfhl/wCEF4V/+VtAB/wpT4N/9El+GX/hBeFf/lbQAf8AClPg3/0SX4Zf+EF4V/8AlbQAf8KU+Df/AESX4Zf+EF4V/wDlbQAf8KU+Df8A0SX4Zf8AhBeFf/lbQAf8KU+Df/RJfhl/4QXhX/5W0AH/AApT4N/9El+GX/hBeFf/AJW0AH/ClPg3/wBEl+GX/hBeFf8A5W0AH/ClPg3/ANEl+GX/AIQXhX/5W0AH/ClPg3/0SX4Zf+EF4V/+VtAB/wAKU+Df/RJfhl/4QXhX/wCVtAB/wpT4N/8ARJfhl/4QXhX/AOVtAB/wpT4N/wDRJfhl/wCEF4V/+VtAB/wpT4N/9El+GX/hBeFf/lbQAf8AClPg3/0SX4Zf+EF4V/8AlbQAf8KU+Df/AESX4Zf+EF4V/wDlbQAf8KU+Df8A0SX4Zf8AhBeFf/lbQAf8KU+Df/RJfhl/4QXhX/5W0AH/AApT4N/9El+GX/hBeFf/AJW0AH/ClPg3/wBEl+GX/hBeFf8A5W0AH/ClPg3/ANEl+GX/AIQXhX/5W0AH/ClPg3/0SX4Zf+EF4V/+VtAB/wAKU+Df/RJfhl/4QXhX/wCVtAB/wpT4N/8ARJfhl/4QXhX/AOVtAB/wpT4N/wDRJfhl/wCEF4V/+VtAB/wpT4N/9El+GX/hBeFf/lbQAf8AClPg3/0SX4Zf+EF4V/8AlbQB/Nd/wrT4c/8ARP8AwT/4Sug//INAH//Q/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9H+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/0v7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/T/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9T+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAeDfEX4/+Hvhf8QfD3gbxN4W8Vx6ZrfgHxz8TNQ+JS33w9sPh34O8G/DOXRk8fav4uvfEHj3RPE1jD4Xt/Evhu/v307wrq0M1hrUU+ny3Z03Xk0oAxf8Ahrr9n+W68P2WneN7nXLrxJ4x0D4f2sHh7wj411x9L8aeI9T13RrPwx4qGmeHrr/hD9esNV8Na1YeJNG8Uto+q+Eriz2+KLLRxNbvKAXNd/am+C2h+MbHwIPFDa34kn8V6n4Q1Ox8OWU+sSaBf6R8P/iz8Q7+7v4YALrVNPt7T4K+PPCs58JW/ia/tfH2nf8ACHXun22rW2qRaWAZN7+2L+z3bRaLNY+N5/ECa/4p0XwRYyeG/DPinWLW38XeIPh9r/xR07w7rupW+jHSvC2qQ+B/Dt5r2vweJ77SB4Ns7zRZvGjeHoNc0qa6ANXSP2r/ANn7WUia2+JWhxg6X4r1q7mlNxLpWl6R4Asbe7+IWsaj4lsobvwrD4f+H91dQeHvGvimLXZ/C3hzxhJH4O1LWovE8sWkOANvf2s/2eNN06x1bUviZpmnadqdvey6fdahpXiSyS7v9N8cH4a6l4YgW50aGR/HenePlfwnf/D0L/wnNlrUctnc+HonikKgGaP2wfgC/wARfCHwzh8arNrHje68eaVoWsLYXkXhaTxJ8O/Gfw8+Hmu+GJtcuY4Ihqs/jj4m+HPB+l3MME+g3njFbzwS+tW/jP8As/QL8A+nKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA/mXoA//1f7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQB5b8Sfgr8MPi/bahZ/EfwrB4otNV+HPxJ+Emo2t1qGsWltefDz4vW2gWfxF8OTw6ZqNlG0XiO18MaJDJf7Rq2mrZE6Pf6c11etcAHk2k/sUfs26Fq/hbXtJ8DavYaz4JstA07wrqNt8R/ifHc6LY+G/Gcfj/TbS0ZfGKqLZ/FKTX2p28ivBrFpqGs6LqsV3omu65p2oAEGtfsOfsueILvx9fal8MAbj4neIfEXivxo1j4y8f6Qmp+IvF3hL4ieCPE+rW0Ok+KLKDRbjXfDvxY+IUOprocenQXWpeJbrxBJEfEFtp+p2gBQ0H9gr9lfwv8AYz4f+HWraW2nW/hSx06S2+J3xZ86w0/wXbeLLLQ9MtLiTx01xBpf2Dx54107WdMSQWHiTT/FWvWXiKDVLbVLtJQB17+wd+y1qWhWfhXU/h9rOqeEdPvfH9/p3g7VPij8W9S8H6dc/FLRPEnh/wCIT6f4WvvHU2g2K+LNM8Y+K01aO10+FJbrxFrGoIqX17PcMAV9M/YB/ZM0jxR4H8Z2PwtuF8S/Di38NWvgzUZ/iF8T7saNF4R+Is3xY0DFjc+M5tNv5LP4gTvr8s2p2l5JfYTS79rnRkTT6AO9sf2UfgVp97a30PhPWJ5dP8S3vizR4NR+IPxI1XTvD+rah8V/AHxwuYvDek6n4vvNL8OaGPin8MPBPiu28LaJZ2Hhmxk0iXSbHSbfQNX1rS9SAPoqgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAP5l6AP/W/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9f+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/0P7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/R/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9L+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/0/7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/U/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9X+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/1v7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/X/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9D+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/0f7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/S/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9P+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/1P7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/V/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9b+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/1/7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/Q/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9H+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/0v7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/T/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9T+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/1f7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/W/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9f+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/0P7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/R/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9L+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/0/7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/U/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9X+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/1v7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/X/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9D+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/0f7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/S/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9P+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/1P7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/V/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9b+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/1/7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/Q/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9H+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/0v7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/T/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9T+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/1f7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/W/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9f+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/0P7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/R/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9L+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/0/7q/gp/yRr4Sf8AZMvAf/qK6VQB6bQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfzL0Af/U/ur+Cn/JGvhJ/wBky8B/+orpVAHptABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/MvQB/9X+6v4Kf8ka+En/AGTLwH/6iulUAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8y9AH/1v7Yfgx8Z/g9D8HvhRFL8V/hrFLF8NfAsckcnjvwskkcieF9LV0dG1IMjowKsrDcrAg4INAHpX/C6/g3/wBFa+GX/he+Ff8A5ZUAH/C6/g3/ANFa+GX/AIXvhX/5ZUAH/C6/g3/0Vr4Zf+F74V/+WVAB/wALr+Df/RWvhl/4XvhX/wCWVAB/wuv4N/8ARWvhl/4XvhX/AOWVAB/wuv4N/wDRWvhl/wCF74V/+WVAB/wuv4N/9Fa+GX/he+Ff/llQAf8AC6/g3/0Vr4Zf+F74V/8AllQAf8Lr+Df/AEVr4Zf+F74V/wDllQAf8Lr+Df8A0Vr4Zf8Ahe+Ff/llQAf8Lr+Df/RWvhl/4XvhX/5ZUAH/AAuv4N/9Fa+GX/he+Ff/AJZUAH/C6/g3/wBFa+GX/he+Ff8A5ZUAH/C6/g3/ANFa+GX/AIXvhX/5ZUAH/C6/g3/0Vr4Zf+F74V/+WVAB/wALr+Df/RWvhl/4XvhX/wCWVAB/wuv4N/8ARWvhl/4XvhX/AOWVAB/wuv4N/wDRWvhl/wCF74V/+WVAB/wuv4N/9Fa+GX/he+Ff/llQAf8AC6/g3/0Vr4Zf+F74V/8AllQAf8Lr+Df/AEVr4Zf+F74V/wDllQAf8Lr+Df8A0Vr4Zf8Ahe+Ff/llQAf8Lr+Df/RWvhl/4XvhX/5ZUAH/AAuv4N/9Fa+GX/he+Ff/AJZUAH/C6/g3/wBFa+GX/he+Ff8A5ZUAH/C6/g3/ANFa+GX/AIXvhX/5ZUAH/C6/g3/0Vr4Zf+F74V/+WVAB/wALr+Df/RWvhl/4XvhX/wCWVAB/wuv4N/8ARWvhl/4XvhX/AOWVAB/wuv4N/wDRWvhl/wCF74V/+WVAB/wuv4N/9Fa+GX/he+Ff/llQAf8AC6/g3/0Vr4Zf+F74V/8AllQAf8Lr+Df/AEVr4Zf+F74V/wDllQAf8Lr+Df8A0Vr4Zf8Ahe+Ff/llQAf8Lr+Df/RWvhl/4XvhX/5ZUAH/AAuv4N/9Fa+GX/he+Ff/AJZUAH/C6/g3/wBFa+GX/he+Ff8A5ZUAH/C6/g3/ANFa+GX/AIXvhX/5ZUAH/C6/g3/0Vr4Zf+F74V/+WVAB/wALr+Df/RWvhl/4XvhX/wCWVAB/wuv4N/8ARWvhl/4XvhX/AOWVAB/wuv4N/wDRWvhl/wCF74V/+WVAB/wuv4N/9Fa+GX/he+Ff/llQAf8AC6/g3/0Vr4Zf+F74V/8AllQB/Nd/wsv4c/8ARQPBP/hVaD/8nUAf/9kAAFBLAwQUAAYACAAAACEAuN5y8JsDAACACQAAEQAAAHdvcmQvc2V0dGluZ3MueG1stFZLj9s2EL4X6H8wdK5Wj8iOV403sL1xs8E6WazcS2+URNnE8iEMKatO0f/eESWunGYRuA3ii8n55s1vxn7z9k/BJ0cKmim58KKr0JtQWaiSyf3C+3238efeRBsiS8KVpAvvRLX39ubnn960qabGoJqeoAupU1EsvIMxdRoEujhQQfSVqqlEsFIgiMEr7ANB4Kmp/UKJmhiWM87MKYjDcOYNbtTCa0CmgwtfsAKUVpXpTFJVVaygw5ezgEvi9ia3qmgElcZGDIByzEFJfWC1dt7E//WG4ME5OX6riKPgTq+NwgvKbRWUzxaXpNcZ1KAKqjU+kOAuQSbHwMlXjp5jX2HsoUTrCs2j0J7OM5/+NwfxvxxofkklPXTPciDQ82QoQxTp3V4qIDlHVmI5E8zIu0FaflZKTNq0plDg2yCnw9ALOgA7oqrMEEMR1jXl3JK84JSgwzbdAxFITyexNiWtSMPNjuSZUTUqHQnm/Tqc9/DhVB+otCT6A8fD4Uk87fHiQIAUhkJWkwKjrZU0oLjTK9VHZdY4CoAv1VtocqQPQI+Mtg+sMA3Q3pGdl/GU9bOHjiQR2IAv5mmrStoV1AC7/I06A5tU5HJ/MZDCXQGspLuu8Zk5cbrBmjL2mS5l+aHRhqFH25DvyOBbCWC7MfInpMruVNMNJV2P9A8KZh9ow1m9ZQAK7mSJlPphwVhVUcAADCm6RdYxUK3t83tKSlzR3xk3OKcRLvxSu8OjUsaphuF8Fs5vN32mHXoJslxGr5fJS8jqOpldW0oFz1FF2i3LB3CnjkIT0VusiciBkcm2W6dBp5HD04pJh+cU9wM9R7Imd6Dv94AWhPMNjp4D7AoQacl0fUsre+ZbAvvR76ABL0pxDXx49tWtFQq/gWrqHm2B1D01nEqUJIMlk+aeCSfXTZ45K4kb7QxqZPnpCLZPY3va1OAT2xG7J5YqVrcCf/M4UIlD1tGAbkld92zK99HC42x/MFFHAIO3En917SXfxwMWWyzuMXshRVcZag+HURY72ZneKyd7NcoSJ0tG2dTJpqNs5mSzToZLlALu4icktjt28kpxrlpavh/xr0RuSxcMXzw7iXxcrr/0GGcaJ63GPWwUOOxXi0VJWqriDsmKp/655+t3yTxa9vDU7m+zQx49YWsfabUimpYD5kynvelfm+4zj1f+MrqN/WQ2XfnzeP3OX23iZbReXs+m6/jvYQ7cX6ebfwAAAP//AwBQSwMEFAAGAAgAAAAhAPC8NQHcAQAA8QUAABIAAAB3b3JkL2ZvbnRUYWJsZS54bWy8k9tq4zAQhu8LfQej+8ay4vRg6pQ0bWBh6cXSfQBFkW2xOhhJiTdvvyPZcQMhbJallUHI/4x+jT40j0+/lUx23DphdImyCUYJ18xshK5L9PN9dXOPEuep3lBpNC/Rnjv0NL++euyKymjvEtivXaFYiRrv2yJNHWu4om5iWq4hWBmrqIdfW6eK2l/b9oYZ1VIv1kIKv08JxrdosLGXuJiqEoy/GLZVXPu4P7VcgqPRrhGtO7h1l7h1xm5aaxh3Du6sZO+nqNCjTZafGCnBrHGm8hO4zFBRtILtGY4rJT8MZv9mQEYDxYpvtTaWriXAh0oSMEPzgX7SFZoqCCypFGsrYqCl2jieQWxHZYkwwSs8gzl8OZ6GGaUhkTXUOh5M+kTcyxVVQu4PKt160+ut8Kw5yDtqRaipDzlRQ2Dr1rhErxgGWa1Qr2QlykFYLEeFhKPiyAZlOio4KCz69BkPcReLPmMOnJn2AE5AvAvFXfLGu+SHUVSfAULwLYCYAY4AZvr5QMji9QjIEpS7+/xw/Q8gD38H0mO8HMgCypJnMDwDhnx4GfF1fD6G43cxYJh+BYahQZLvom782TYJzfFFbbIIFZPjVxHahOC75xMc8fL/2SbDws3/AAAA//8DAFBLAwQUAAYACAAAACEA4IvKVR8BAAARAgAAFAAAAHdvcmQvd2ViU2V0dGluZ3MueG1slNFRS8MwEAfwd8HvUPK+pRs6tKwbgkz2MgbVD5Cl1zWY5EIua7dv71nnRHyZbzku9+P+3Hx5dDbrIJJBX4rJOBcZeI218ftSvL2uRg8io6R8rSx6KMUJSCwXtzfzvuhhV0FK/JMyVjwVTpeiTSkUUpJuwSkaYwDPzQajU4nLuJdOxfdDGGl0QSWzM9akk5zm+UycmXiNgk1jNDyjPjjwaZiXESyL6Kk1gb61/hqtx1iHiBqIOI+zX55Txl+Yyd0fyBkdkbBJYw5z3migeHySDy9nf4D7/wHTC+B0sd57jGpn+QS8ScaYWPANlLXYbzcv8rOocYOpUh08UcUpLKyMhaETzBEsbSGuvW6zvuiULcXjTHBT/jrk4gMAAP//AwBQSwMEFAAGAAgAAAAhABZNBGBtAQAA7wIAABEACAFkb2NQcm9wcy9jb3JlLnhtbCCiBAEooAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJySUW+CMBSF35fsP5C+Q4suxhDAZDM+zcRkLlv21rVX7YS2aavIv18BxbH5tLd7e757uJw2nZ3KIjiCsULJDMURQQFIpriQ2wy9rhfhFAXWUclpoSRkqAaLZvn9Xcp0wpSBlVEajBNgA+8kbcJ0hnbO6QRjy3ZQUht5Qnpxo0xJnW/NFmvK9nQLeETIBJfgKKeO4sYw1L0jOlty1lvqgylaA84wFFCCdBbHUYyvrANT2psDrfKDLIWrNdxEL2JPn6zowaqqomrcon7/GL8vn1/aXw2FbLJigPKUs8QJV0Ce4mvpK3v4/ALmuuO+8TUzQJ0y+ZweBQ9WovBdC12EJvI91JUy3PrxQecxDpYZoZ2/yM58cODpglq39De7EcAf61/f+as3IwaOonkZedwSfZueY+52Ax74eJIuzIvyNn6arxcoH5F4EpJxGJM1mSajh4SQj2a9wfzVsDwv8G/Hi0GX0PCJ5t8AAAD//wMAUEsDBBQABgAIAAAAIQCBlv05MgsAAGRyAAAPAAAAd29yZC9zdHlsZXMueG1svJ3bctu6FYbvO9N34OiqvXB8jJ14trPHduLaUzvbO3Kaa4iEJNQgofLgQ5++IEhJkBdBcQGrvrIlan0A8eMHsEBS+u33l1RGTzwvhMrORvsf9kYRz2KViGx2Nvr5cLXzaRQVJcsSJlXGz0avvBj9/uWvf/nt+bQoXyUvIg3IitM0PhvNy3JxurtbxHOesuKDWvBMH5yqPGWlfpnPdlOWP1aLnVilC1aKiZCifN092Ns7HrWYfAhFTaci5l9VXKU8K038bs6lJqqsmItFsaQ9D6E9qzxZ5CrmRaFPOpUNL2UiW2H2jwAoFXGuCjUtP+iTaWtkUDp8f8/8l8o14CMOcLACpPHpzSxTOZtI3fq6JpGGjb7o5k9U/JVPWSXLon6Z3+fty/aV+XOlsrKInk9ZEQvxoEvWkFRo3vV5VoiRPsJZUZ4XgnUenNf/dB6Ji9J6+0IkYrRbl1j8Vx98YvJsdHC0fOeyrsHGe5Jls+V703zn6oddk7MRz3Z+juu3Jpp7NmL5zvi8DtxtT6z5a53uYvWq+dSbttFdQ3eUcdNf9VE+vVXxI0/GpT5wNtqri9Jv/ry5z4XKdZ88G33+3L455qm4FknCM+uD2Vwk/NecZz8Lnqzf//PK9Kv2jVhVmf7/8NOe0UsWybeXmC/qXqqPZqxuve91gKw/XYl14Sb8P0vYfttmXfFzzmqrRvtvEab6KMRBHVFYZ9vNrN6cu/kUqqDD9yro6L0K+vheBR2/V0En71XQp/cqyGD+nwWJLOEvjRFhMYC6jeNwI5rjMBua4/ASmuOwCprjcAKa4+joaI6jH6M5jm6K4JQqdvVCq7MfOnp7P3f7HOHH3T4l+HG3zwB+3O0Dvh93+/jux90+nPtxt4/eftztgzWe2yy1ohtts6wMdtlUqTJTJY9K/hJOY5lmmfyFhldPejwnOUkCTDOytRNxMC1m5vX2HmJM6j+fl3XKFalpNBWzKtdpb2jFefbEpU5AI5YkmkcIzHlZ5Y4W8enTOZ/ynGcxp+zYdFApMh5lVToh6JsLNiNj8Swhbr4lkWRQWHVoVpXz2iSCoFOnLM5VeNUUIxsfbkUR3lY1JLqopORErO80XcywwnMDgwlPDQwmPDMwmPDEwNKMqolaGlFLtTSiBmtpRO3W9E+qdmtpRO3W0ojaraWFt9uDKKUZ4u1Vx/7wvbtLqeod5+B6jMUsY3oBED7dtHum0T3L2Sxni3lU7x93Y+1zxpZzoZLX6IFiTluRqNb1potc6rMWWRXeoBs0KnOteET2WvGIDLbihVvsTi+T6wXaNU0+M64mZadpDWmQacdMVs2CNtxtrAzvYWsDXIm8ILNBN5agB3+vl7O1nBQj37qW4RVbs8Jt9XZUIq1eiySopVTxI80wfP264LlOyx6DSVdKSvXMEzriuMxV09dsyx8YSQZZ/lu6mLNCmFxpAzF8ql9eq47u2CL4hO4lExmNbt92UiZkRLeCuH64u40e1KJOM+uGoQFeqLJUKRmz3Qn82y8++TtNBc91Epy9Ep3tOdH2kIFdCoJJpiGphIikl5kiEyRzqOH9k79OFMsTGtp9zpvbQ0pORByzdNEsOgi8pcfFZz3+EKyGDO9fLBf1vhCVqR5IYNa2YVFN/s3j8KHuu4pIdob+qEqz/2iWuiaaDhe+TNjAhS8RjJp6eqj7L8HJbuDCT3YDR3Wyl5IVhXBeQvXmUZ3ukkd9vuHJX8tTUuXTStI14BJI1oJLIFkTKlmlWUF5xoZHeMKGR32+hF3G8Ai25AzvH7lIyMQwMColDIxKBgOj0sDASAUIv0PHgoXfpmPBwu/VaWBESwALRtXPSKd/oqs8FoyqnxkYVT8zMKp+ZmBU/ezwa8SnU70IpptiLCRVn7OQdBNNVvJ0oXKWvxIhv0k+YwQbpA3tPlfT+rkBlTU3cRMg6z1qSbjYbnBUIv/iE7Kq1SzKehHsiDIplSLaW1tPOCZy8961bWHmmYvgKpjN9lv+xClW4xaM6DJAAwuXzYKFT1MWLHyasmDh05QFC5+mLFj4NGXBwu9fvpcs5nMlE547jNhXkWi8YHF7bQlcox60V38rZvMyGs9Xl6hszPHe1sjlLtNG2PYCuwaK44OesDueiCpdVhQ+AXR8ODzYGHojePmgVk/wevm7EflxYCQs83h75Dq124g8GRgJy/w0MNKMUhuRfYP4V5Y/dnaEk77+s9qYcHS+k75etAruLLavI60iu7rgSV8v2rBKdB7H9SUuqM4wz7jjh5nHHY9xkZuCsZObMthXbkSfwX7wJ1EvRzGDpilvdcvP2+IOzZQ6aOT8s1LNxaaNq6TDn0S80av9rOBRJ+dw+NXWjVHG3Y6Dhxs3YvC440YMHoDciEEjkTMcNSS5KYPHJjdi8CDlRqBHKzgj4EYrGI8brWC8z2gFKT6jVcAqwI0YvBxwI9BGhQi0UQNWCm4Eyqgg3MuokII2KkSgjQoRaKPCBRjOqDAeZ1QY72NUSPExKqSgjQoRaKNCBNqoEIE2KkSgjeq5tneGexkVUtBGhQi0USECbVSzXgwwKozHGRXG+xgVUnyMCiloo0IE2qgQgTYqRKCNChFoo0IEyqgg3MuokII2KkSgjQoRaKM2z8f6GxXG44wK432MCik+RoUUtFEhAm1UiEAbFSLQRoUItFEhAmVUEO5lVEhBGxUi0EaFCLRRzaWDAKPCeJxRYbyPUSHFx6iQgjYqRKCNChFoo0IE2qgQgTYqRKCMCsK9jAopaKNCBNqoENHXP9vr6q5nQ/bxu57Ox0yGX7pqK/XD/v4BG3U4HLWslZs1/AGaC6Ueo86nZQ9NvjEMIiZSKLNF7bgXxOaaC6Soq/V/XPY/lmbTA78prH2Ax1zoB/CjoZFgT+Wor8vbkSDJO+rr6XYkWHUe9Y2+diSYBo/6Bl3jy+WdVHo6AsF9w4wVvO8I7xutrXDYxH1jtBUIW7hvZLYCYQP3jcdW4MeoHpzfRn8c2E7Hq5uiAaGvO1qEEzehr1tCrZbDMTTGUNHchKHquQlDZXQTUHo6MXhh3Si0wm6Un9TQZlip/Y3qJmClhgQvqQHGX2qI8pYaovykhgMjVmpIwErtPzi7CV5SA4y/1BDlLTVE+UkNpzKs1JCAlRoSsFIHTshOjL/UEOUtNUT5SQ0Xd1ipIQErNSRgpYYEL6kBxl9qiPKWGqL8pAZZMlpqSMBKDQlYqSHBS2qA8Zcaorylhqg+qc0uyobUKIWtcNwizArETchWIG5wtgI9siUr2jNbsgie2RLUaqk5LluyRXMThqrnJgyV0U1A6enE4IV1o9AKu1F+UuOypS6p/Y3qJmClxmVLTqlx2VKv1LhsqVdqXLbklhqXLXVJjcuWuqT2H5zdBC+pcdlSr9S4bKlXaly25JYaly11SY3LlrqkxmVLXVIHTshOjL/UuGypV2pctuSWGpctdUmNy5a6pMZlS11S47Ilp9S4bKlXaly21Cs1LltyS43LlrqkxmVLXVLjsqUuqXHZklNqXLbUKzUuW+qV2pEt7T5v/GpYzTa/d6c/XL4ueP3F8dYDM0nzxbntRUDzwZtk9etedXBdk6j9xbP2bVPh9oJhU6IJhEXFc11W3H7ll6OoeyWFPm+WJ/pwCYp0fLOvqcL65JefbhtzfRG0+dzGBc/eGpd1Y/fU1ojBqt72aRRzVfFz2wW31VHXaCKbH8PT/9xkiQY8t7+w1tQ1eWENSh+/5FLesebTauH+qOTTsjm6v2cen31zfNJ8YaEzPjeDhBOwu1mZ5mX7w3eOFm9+wqC9eu1o9fMqrjIutRt4R5ub+ylCm3tdweV/xZf/AQAA//8DAFBLAwQUAAYACAAAACEAQP7QLGkBAAC3AgAAEAAIAWRvY1Byb3BzL2FwcC54bWwgogQBKKAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACcUk1LxTAQvAv+h9K7L32CH8i+iCjiQUV4Vc8h2bbBNAnJKr5/78ZqrXgzp92ZZHZmCZy/j656w5Rt8Jt6vWrqCr0Oxvp+Uz+21wendZVJeaNc8Lipd5jrc7m/Bw8pRExkMVcs4fOmHojimRBZDziqvGLaM9OFNCriNvUidJ3VeBX064iexGHTHAt8J/QGzUGcBetJ8eyN/itqgi7+8lO7i6wnocUxOkUo78tLtzKBRhAzCm0g5Vo7omwYnht4UD1muQYxFfAcksnyEMRUwOWgktLE+5PrExCLFi5idFYr4sXKO6tTyKGj6k5p6ynkoSoKIJa3gENsUb8mS7viY9nCrfWTk6lgZ0n1ScXhy97cwVYrh5ccX3bKZQTxAxSVl/wY23BVYn/xv8FFpmdLwzYqXQafLtMtCNgyioa9zuNmAG54/ckVeX7rezTfd/4SZV9P0z+U66NVw+dzO98YZ5w/iPwAAAD//wMAUEsBAi0AFAAGAAgAAAAhAG2KJ0tmAQAAVAUAABMAAAAAAAAAAAAAAAAAAAAAAFtDb250ZW50X1R5cGVzXS54bWxQSwECLQAUAAYACAAAACEAx8InvP8AAADfAgAACwAAAAAAAAAAAAAAAACfAwAAX3JlbHMvLnJlbHNQSwECLQAUAAYACAAAACEAE6o+h/YAAAAxAwAAHAAAAAAAAAAAAAAAAADPBgAAd29yZC9fcmVscy9kb2N1bWVudC54bWwucmVsc1BLAQItABQABgAIAAAAIQD1Yo5gZQIAAA4HAAARAAAAAAAAAAAAAAAAAAcJAAB3b3JkL2RvY3VtZW50LnhtbFBLAQItABQABgAIAAAAIQBtTVmrIQYAAI4aAAAVAAAAAAAAAAAAAAAAAJsLAAB3b3JkL3RoZW1lL3RoZW1lMS54bWxQSwECLQAKAAAAAAAAACEAvOgH/fQnAAD0JwAAFwAAAAAAAAAAAAAAAADvEQAAZG9jUHJvcHMvdGh1bWJuYWlsLmpwZWdQSwECLQAUAAYACAAAACEAuN5y8JsDAACACQAAEQAAAAAAAAAAAAAAAAAYOgAAd29yZC9zZXR0aW5ncy54bWxQSwECLQAUAAYACAAAACEA8Lw1AdwBAADxBQAAEgAAAAAAAAAAAAAAAADiPQAAd29yZC9mb250VGFibGUueG1sUEsBAi0AFAAGAAgAAAAhAOCLylUfAQAAEQIAABQAAAAAAAAAAAAAAAAA7j8AAHdvcmQvd2ViU2V0dGluZ3MueG1sUEsBAi0AFAAGAAgAAAAhABZNBGBtAQAA7wIAABEAAAAAAAAAAAAAAAAAP0EAAGRvY1Byb3BzL2NvcmUueG1sUEsBAi0AFAAGAAgAAAAhAIGW/TkyCwAAZHIAAA8AAAAAAAAAAAAAAAAA40MAAHdvcmQvc3R5bGVzLnhtbFBLAQItABQABgAIAAAAIQBA/tAsaQEAALcCAAAQAAAAAAAAAAAAAAAAAEJPAABkb2NQcm9wcy9hcHAueG1sUEsFBgAAAAAMAAwABgMAAOFRAAAAAA==" } + + - do: + search: + index: test + body: + fields: [file.content, file.author, file.date, file.content_length, file.content_type] + - match: { hits.total: 1 } + - match: { hits.hits.0.fields: { + file.content: ["Test elasticsearch\n"], + file.author: ["David Pilato"], + file.date: ["2016-03-10T08:24:00Z"], + file.content_length: ["21757"], + file.content_type: ["application/vnd.openxmlformats-officedocument.wordprocessingml.document"] + } + } + diff --git a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java index ce78c75d783..802ca1d7653 100644 --- a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java +++ b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java @@ -26,7 +26,6 @@ import java.util.Map; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.hash.MurmurHash3; import org.elasticsearch.common.settings.Settings; @@ -72,12 +71,10 @@ public class Murmur3FieldMapper extends LongFieldMapper { @Override protected void setupFieldType(BuilderContext context) { super.setupFieldType(context); - if (context.indexCreatedVersion().onOrAfter(Version.V_2_0_0_beta1)) { - fieldType.setIndexOptions(IndexOptions.NONE); - defaultFieldType.setIndexOptions(IndexOptions.NONE); - fieldType.setHasDocValues(true); - defaultFieldType.setHasDocValues(true); - } + fieldType.setIndexOptions(IndexOptions.NONE); + defaultFieldType.setIndexOptions(IndexOptions.NONE); + fieldType.setHasDocValues(true); + defaultFieldType.setHasDocValues(true); } @Override @@ -97,17 +94,11 @@ public class Murmur3FieldMapper extends LongFieldMapper { Builder builder = new Builder(name); // tweaking these settings is no longer allowed, the entire purpose of murmur3 fields is to store a hash - if (parserContext.indexVersionCreated().onOrAfter(Version.V_2_0_0_beta1)) { - if (node.get("doc_values") != null) { - throw new MapperParsingException("Setting [doc_values] cannot be modified for field [" + name + "]"); - } - if (node.get("index") != null) { - throw new MapperParsingException("Setting [index] cannot be modified for field [" + name + "]"); - } + if (node.get("doc_values") != null) { + throw new MapperParsingException("Setting [doc_values] cannot be modified for field [" + name + "]"); } - - if (parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { - builder.indexOptions(IndexOptions.DOCS); + if (node.get("index") != null) { + throw new MapperParsingException("Setting [index] cannot be modified for field [" + name + "]"); } parseNumberField(builder, name, node, parserContext); diff --git a/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperTests.java b/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperTests.java index 072c0db3e59..16865eb98b6 100644 --- a/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperTests.java +++ b/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperTests.java @@ -22,10 +22,7 @@ package org.elasticsearch.index.mapper.murmur3; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DocumentMapper; @@ -33,22 +30,14 @@ import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.indices.mapper.MapperRegistry; -import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; -import org.elasticsearch.test.InternalSettingsPlugin; import org.junit.Before; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; public class Murmur3FieldMapperTests extends ESSingleNodeTestCase { - @Override - protected Collection> getPlugins() { - return pluginList(InternalSettingsPlugin.class); - } - MapperRegistry mapperRegistry; IndexService indexService; DocumentMapperParser parser; @@ -131,38 +120,4 @@ public class Murmur3FieldMapperTests extends ESSingleNodeTestCase { assertTrue(e.getMessage().contains("Setting [index] cannot be modified")); } } - - public void testDocValuesSettingBackcompat() throws Exception { - Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build(); - indexService = createIndex("test_bwc", settings); - parser = new DocumentMapperParser(indexService.getIndexSettings(), indexService.mapperService(), - indexService.analysisService(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext); - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties").startObject("field") - .field("type", "murmur3") - .field("doc_values", false) - .endObject().endObject().endObject().endObject().string(); - - DocumentMapper docMapper = parser.parse("type", new CompressedXContent(mapping)); - Murmur3FieldMapper mapper = (Murmur3FieldMapper)docMapper.mappers().getMapper("field"); - assertFalse(mapper.fieldType().hasDocValues()); - } - - public void testIndexSettingBackcompat() throws Exception { - Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build(); - indexService = createIndex("test_bwc", settings); - parser = new DocumentMapperParser(indexService.getIndexSettings(), indexService.mapperService(), - indexService.analysisService(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext); - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties").startObject("field") - .field("type", "murmur3") - .field("index", "not_analyzed") - .endObject().endObject().endObject().endObject().string(); - - DocumentMapper docMapper = parser.parse("type", new CompressedXContent(mapping)); - Murmur3FieldMapper mapper = (Murmur3FieldMapper)docMapper.mappers().getMapper("field"); - assertEquals(IndexOptions.DOCS, mapper.fieldType().indexOptions()); - } - - // TODO: add more tests } diff --git a/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperUpgradeTests.java b/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperUpgradeTests.java index fe12cb042d4..c632e139955 100644 --- a/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperUpgradeTests.java +++ b/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperUpgradeTests.java @@ -52,6 +52,7 @@ public class Murmur3FieldMapperUpgradeTests extends ESIntegTestCase { public void testUpgradeOldMapping() throws IOException, ExecutionException, InterruptedException { final String indexName = "index-mapper-murmur3-2.0.0"; + final String indexUUID = "1VzJds59TTK7lRu17W0mcg"; InternalTestCluster.Async master = internalCluster().startNodeAsync(); Path unzipDir = createTempDir(); Path unzipDataDir = unzipDir.resolve("data"); @@ -72,6 +73,7 @@ public class Murmur3FieldMapperUpgradeTests extends ESIntegTestCase { assertFalse(Files.exists(dataPath)); Path src = unzipDataDir.resolve(indexName + "/nodes/0/indices"); Files.move(src, dataPath); + Files.move(dataPath.resolve(indexName), dataPath.resolve(indexUUID)); master.get(); // force reloading dangling indices with a cluster state republish diff --git a/plugins/mapper-size/build.gradle b/plugins/mapper-size/build.gradle index 7af65d19ef3..7d5aa1ee276 100644 --- a/plugins/mapper-size/build.gradle +++ b/plugins/mapper-size/build.gradle @@ -22,3 +22,6 @@ esplugin { classname 'org.elasticsearch.plugin.mapper.MapperSizePlugin' } +// TODO: migrate to points +compileJava.options.compilerArgs << "-Xlint:-deprecation" +compileTestJava.options.compilerArgs << "-Xlint:-deprecation" diff --git a/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java b/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java index 984e83a438e..cfc7e29486c 100644 --- a/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java +++ b/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.mapper.size; import org.apache.lucene.document.Field; -import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -39,7 +38,6 @@ import java.util.List; import java.util.Map; import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue; -import static org.elasticsearch.index.mapper.core.TypeParsers.parseStore; public class SizeFieldMapper extends MetadataFieldMapper { @@ -94,9 +92,6 @@ public class SizeFieldMapper extends MetadataFieldMapper { if (fieldName.equals("enabled")) { builder.enabled(lenientNodeBooleanValue(fieldNode) ? EnabledAttributeMapper.ENABLED : EnabledAttributeMapper.DISABLED); iterator.remove(); - } else if (fieldName.equals("store") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { - builder.store(parseStore(fieldName, fieldNode.toString(), parserContext)); - iterator.remove(); } } return builder; diff --git a/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeFieldMapperUpgradeTests.java b/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeFieldMapperUpgradeTests.java index a2af6df4e75..761fb5fd144 100644 --- a/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeFieldMapperUpgradeTests.java +++ b/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeFieldMapperUpgradeTests.java @@ -53,6 +53,7 @@ public class SizeFieldMapperUpgradeTests extends ESIntegTestCase { public void testUpgradeOldMapping() throws IOException, ExecutionException, InterruptedException { final String indexName = "index-mapper-size-2.0.0"; + final String indexUUID = "ENCw7sG0SWuTPcH60bHheg"; InternalTestCluster.Async master = internalCluster().startNodeAsync(); Path unzipDir = createTempDir(); Path unzipDataDir = unzipDir.resolve("data"); @@ -73,6 +74,7 @@ public class SizeFieldMapperUpgradeTests extends ESIntegTestCase { assertFalse(Files.exists(dataPath)); Path src = unzipDataDir.resolve(indexName + "/nodes/0/indices"); Files.move(src, dataPath); + Files.move(dataPath.resolve(indexName), dataPath.resolve(indexUUID)); master.get(); // force reloading dangling indices with a cluster state republish client().admin().cluster().prepareReroute().get(); diff --git a/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java b/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java index d6b64df9e5d..174520cfada 100644 --- a/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java +++ b/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java @@ -19,30 +19,20 @@ package org.elasticsearch.index.mapper.size; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.indices.IndicesModule; -import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; -import org.elasticsearch.test.InternalSettingsPlugin; import org.junit.Before; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -55,15 +45,9 @@ public class SizeMappingTests extends ESSingleNodeTestCase { MapperService mapperService; DocumentMapperParser parser; - @Override - protected Collection> getPlugins() { - return pluginList(InternalSettingsPlugin.class); // uses index.version.created - } - @Before public void before() { indexService = createIndex("test"); - Map metadataMappers = new HashMap<>(); IndicesModule indices = new IndicesModule(); indices.registerMetadataMapper(SizeFieldMapper.NAME, new SizeFieldMapper.TypeParser()); mapperService = new MapperService(indexService.getIndexSettings(), indexService.analysisService(), indexService.similarityService(), indices.getMapperRegistry(), indexService::newQueryShardContext); @@ -87,31 +71,6 @@ public class SizeMappingTests extends ESSingleNodeTestCase { assertThat(doc.rootDoc().getField("_size").tokenStream(docMapper.mappers().indexAnalyzer(), null), notNullValue()); } - public void testSizeEnabledAndStoredBackcompat() throws Exception { - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("_size").field("enabled", true).field("store", "yes").endObject() - .endObject().endObject().string(); - Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build(); - - indexService = createIndex("test2", indexSettings); - MapperRegistry mapperRegistry = new MapperRegistry( - Collections.emptyMap(), - Collections.singletonMap(SizeFieldMapper.NAME, new SizeFieldMapper.TypeParser())); - parser = new DocumentMapperParser(indexService.getIndexSettings(), mapperService, - indexService.analysisService(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext); - DocumentMapper docMapper = parser.parse("type", new CompressedXContent(mapping)); - - BytesReference source = XContentFactory.jsonBuilder() - .startObject() - .field("field", "value") - .endObject() - .bytes(); - ParsedDocument doc = docMapper.parse(SourceToParse.source(source).type("type").id("1")); - - assertThat(doc.rootDoc().getField("_size").fieldType().stored(), equalTo(true)); - assertThat(doc.rootDoc().getField("_size").tokenStream(docMapper.mappers().indexAnalyzer(), null), notNullValue()); - } - public void testSizeDisabled() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("_size").field("enabled", false).endObject() diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java index 1c3ece1adad..01d66c177a2 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java @@ -24,6 +24,7 @@ import com.microsoft.azure.storage.StorageException; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -31,7 +32,6 @@ import java.io.InputStream; import java.io.OutputStream; import java.net.URISyntaxException; import java.util.Map; -import java.util.function.Function; /** * Azure Storage Service interface @@ -41,13 +41,20 @@ public interface AzureStorageService { final class Storage { public static final String PREFIX = "cloud.azure.storage."; - public static final Setting TIMEOUT_SETTING = Setting.timeSetting("cloud.azure.storage.timeout", TimeValue.timeValueSeconds(-1), false, Setting.Scope.CLUSTER); - public static final Setting ACCOUNT_SETTING = Setting.simpleString("repositories.azure.account", false, Setting.Scope.CLUSTER); - public static final Setting CONTAINER_SETTING = Setting.simpleString("repositories.azure.container", false, Setting.Scope.CLUSTER); - public static final Setting BASE_PATH_SETTING = Setting.simpleString("repositories.azure.base_path", false, Setting.Scope.CLUSTER); - public static final Setting LOCATION_MODE_SETTING = Setting.simpleString("repositories.azure.location_mode", false, Setting.Scope.CLUSTER); - public static final Setting CHUNK_SIZE_SETTING = Setting.byteSizeSetting("repositories.azure.chunk_size", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER); - public static final Setting COMPRESS_SETTING = Setting.boolSetting("repositories.azure.compress", false, false, Setting.Scope.CLUSTER); + public static final Setting TIMEOUT_SETTING = + Setting.timeSetting("cloud.azure.storage.timeout", TimeValue.timeValueMinutes(-1), Property.NodeScope); + public static final Setting ACCOUNT_SETTING = + Setting.simpleString("repositories.azure.account", Property.NodeScope, Property.Filtered); + public static final Setting CONTAINER_SETTING = + Setting.simpleString("repositories.azure.container", Property.NodeScope); + public static final Setting BASE_PATH_SETTING = + Setting.simpleString("repositories.azure.base_path", Property.NodeScope); + public static final Setting LOCATION_MODE_SETTING = + Setting.simpleString("repositories.azure.location_mode", Property.NodeScope); + public static final Setting CHUNK_SIZE_SETTING = + Setting.byteSizeSetting("repositories.azure.chunk_size", new ByteSizeValue(-1), Property.NodeScope); + public static final Setting COMPRESS_SETTING = + Setting.boolSetting("repositories.azure.compress", false, Property.NodeScope); } boolean doesContainerExist(String account, LocationMode mode, String container); @@ -62,13 +69,17 @@ public interface AzureStorageService { void deleteBlob(String account, LocationMode mode, String container, String blob) throws URISyntaxException, StorageException; - InputStream getInputStream(String account, LocationMode mode, String container, String blob) throws URISyntaxException, StorageException; + InputStream getInputStream(String account, LocationMode mode, String container, String blob) + throws URISyntaxException, StorageException; - OutputStream getOutputStream(String account, LocationMode mode, String container, String blob) throws URISyntaxException, StorageException; + OutputStream getOutputStream(String account, LocationMode mode, String container, String blob) + throws URISyntaxException, StorageException; - Map listBlobsByPrefix(String account, LocationMode mode, String container, String keyPath, String prefix) throws URISyntaxException, StorageException; + Map listBlobsByPrefix(String account, LocationMode mode, String container, String keyPath, String prefix) + throws URISyntaxException, StorageException; - void moveBlob(String account, LocationMode mode, String container, String sourceBlob, String targetBlob) throws URISyntaxException, StorageException; + void moveBlob(String account, LocationMode mode, String container, String sourceBlob, String targetBlob) + throws URISyntaxException, StorageException; AzureStorageService start(); } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java index cdc6d74edb0..497b0e3753a 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java @@ -169,7 +169,7 @@ public class AzureStorageServiceImpl extends AbstractLifecycleComponent Storage.TIMEOUT_SETTING.get(s).toString(), (s) -> Setting.parseTimeValue(s, TimeValue.timeValueSeconds(-1), TIMEOUT_KEY.toString()), - false, - Setting.Scope.CLUSTER); - private static final Setting ACCOUNT_SETTING = Setting.adfixKeySetting(Storage.PREFIX, ACCOUNT_SUFFIX, "", Function.identity(), false, Setting.Scope.CLUSTER); - private static final Setting KEY_SETTING = Setting.adfixKeySetting(Storage.PREFIX, KEY_SUFFIX, "", Function.identity(), false, Setting.Scope.CLUSTER); - private static final Setting DEFAULT_SETTING = Setting.adfixKeySetting(Storage.PREFIX, DEFAULT_SUFFIX, "false", Boolean::valueOf, false, Setting.Scope.CLUSTER); + Setting.Property.NodeScope); + private static final Setting ACCOUNT_SETTING = + Setting.adfixKeySetting(Storage.PREFIX, ACCOUNT_SUFFIX, "", Function.identity(), Setting.Property.NodeScope); + private static final Setting KEY_SETTING = + Setting.adfixKeySetting(Storage.PREFIX, KEY_SUFFIX, "", Function.identity(), Setting.Property.NodeScope); + private static final Setting DEFAULT_SETTING = + Setting.adfixKeySetting(Storage.PREFIX, DEFAULT_SUFFIX, "false", Boolean::valueOf, Setting.Property.NodeScope); private final String name; @@ -110,7 +112,7 @@ public final class AzureStorageSettings { } private static List createStorageSettings(Settings settings) { - Setting storageGroupSetting = Setting.groupSetting(Storage.PREFIX, false, Setting.Scope.CLUSTER); + Setting storageGroupSetting = Setting.groupSetting(Storage.PREFIX, Setting.Property.NodeScope); // ignore global timeout which has the same prefix but does not belong to any group Settings groups = storageGroupSetting.get(settings.filter((k) -> k.equals(Storage.TIMEOUT_SETTING.getKey()) == false)); List storageSettings = new ArrayList<>(); diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java b/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java index 616b150f954..3ce043500ae 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java @@ -74,9 +74,9 @@ public class AzureRepositoryPlugin extends Plugin { module.registerSetting(AzureStorageService.Storage.BASE_PATH_SETTING); module.registerSetting(AzureStorageService.Storage.CHUNK_SIZE_SETTING); module.registerSetting(AzureStorageService.Storage.LOCATION_MODE_SETTING); - // Cloud storage API settings needed to be hidden + + // Cloud storage API settings using a pattern needed to be hidden module.registerSettingsFilter(AzureStorageService.Storage.PREFIX + "*.account"); module.registerSettingsFilter(AzureStorageService.Storage.PREFIX + "*.key"); - module.registerSettingsFilter(AzureStorageService.Storage.ACCOUNT_SETTING.getKey()); } } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index f2773bccbbd..66db57fdd92 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -67,12 +68,14 @@ public class AzureRepository extends BlobStoreRepository { public final static String TYPE = "azure"; public static final class Repository { - public static final Setting ACCOUNT_SETTING = Setting.simpleString("account", false, Setting.Scope.CLUSTER); - public static final Setting CONTAINER_SETTING = new Setting<>("container", "elasticsearch-snapshots", Function.identity(), false, Setting.Scope.CLUSTER); - public static final Setting BASE_PATH_SETTING = Setting.simpleString("base_path", false, Setting.Scope.CLUSTER); - public static final Setting LOCATION_MODE_SETTING = Setting.simpleString("location_mode", false, Setting.Scope.CLUSTER); - public static final Setting CHUNK_SIZE_SETTING = Setting.byteSizeSetting("chunk_size", MAX_CHUNK_SIZE, false, Setting.Scope.CLUSTER); - public static final Setting COMPRESS_SETTING = Setting.boolSetting("compress", false, false, Setting.Scope.CLUSTER); + public static final Setting ACCOUNT_SETTING = Setting.simpleString("account", Property.NodeScope); + public static final Setting CONTAINER_SETTING = + new Setting<>("container", "elasticsearch-snapshots", Function.identity(), Property.NodeScope); + public static final Setting BASE_PATH_SETTING = Setting.simpleString("base_path", Property.NodeScope); + public static final Setting LOCATION_MODE_SETTING = Setting.simpleString("location_mode", Property.NodeScope); + public static final Setting CHUNK_SIZE_SETTING = + Setting.byteSizeSetting("chunk_size", MAX_CHUNK_SIZE, Property.NodeScope); + public static final Setting COMPRESS_SETTING = Setting.boolSetting("compress", false, Property.NodeScope); } private final AzureBlobStore blobStore; diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 915a85ebdc4..8fc9e50d7f3 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -45,6 +45,7 @@ dependencies { compile 'com.google.guava:guava:16.0.1' compile 'com.google.protobuf:protobuf-java:2.5.0' compile 'commons-logging:commons-logging:1.1.3' + compile 'commons-cli:commons-cli:1.2' compile 'commons-collections:commons-collections:3.2.2' compile 'commons-configuration:commons-configuration:1.6' compile 'commons-io:commons-io:2.4' diff --git a/plugins/repository-hdfs/licenses/commons-cli-1.2.jar.sha1 b/plugins/repository-hdfs/licenses/commons-cli-1.2.jar.sha1 new file mode 100644 index 00000000000..d38d00127e8 --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-cli-1.2.jar.sha1 @@ -0,0 +1 @@ +2bf96b7aa8b611c177d329452af1dc933e14501c \ No newline at end of file diff --git a/distribution/licenses/commons-cli-LICENSE.txt b/plugins/repository-hdfs/licenses/commons-cli-LICENSE.txt similarity index 100% rename from distribution/licenses/commons-cli-LICENSE.txt rename to plugins/repository-hdfs/licenses/commons-cli-LICENSE.txt diff --git a/distribution/licenses/commons-cli-NOTICE.txt b/plugins/repository-hdfs/licenses/commons-cli-NOTICE.txt similarity index 100% rename from distribution/licenses/commons-cli-NOTICE.txt rename to plugins/repository-hdfs/licenses/commons-cli-NOTICE.txt diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsS3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsS3Service.java index 3ccd6d7987f..427c454fa28 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsS3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsS3Service.java @@ -23,6 +23,7 @@ import com.amazonaws.Protocol; import com.amazonaws.services.s3.AmazonS3; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import java.util.Locale; import java.util.function.Function; @@ -38,40 +39,44 @@ public interface AwsS3Service extends LifecycleComponent { /** * cloud.aws.access_key: AWS Access key. Shared with discovery-ec2 plugin */ - Setting KEY_SETTING = Setting.simpleString("cloud.aws.access_key", false, Setting.Scope.CLUSTER); + Setting KEY_SETTING = + Setting.simpleString("cloud.aws.access_key", Property.NodeScope, Property.Filtered); /** * cloud.aws.secret_key: AWS Secret key. Shared with discovery-ec2 plugin */ - Setting SECRET_SETTING = Setting.simpleString("cloud.aws.secret_key", false, Setting.Scope.CLUSTER); + Setting SECRET_SETTING = + Setting.simpleString("cloud.aws.secret_key", Property.NodeScope, Property.Filtered); /** * cloud.aws.protocol: Protocol for AWS API: http or https. Defaults to https. Shared with discovery-ec2 plugin */ Setting PROTOCOL_SETTING = new Setting<>("cloud.aws.protocol", "https", s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), - false, Setting.Scope.CLUSTER); + Property.NodeScope); /** * cloud.aws.proxy.host: In case of proxy, define its hostname/IP. Shared with discovery-ec2 plugin */ - Setting PROXY_HOST_SETTING = Setting.simpleString("cloud.aws.proxy.host", false, Setting.Scope.CLUSTER); + Setting PROXY_HOST_SETTING = Setting.simpleString("cloud.aws.proxy.host", Property.NodeScope); /** * cloud.aws.proxy.port: In case of proxy, define its port. Defaults to 80. Shared with discovery-ec2 plugin */ - Setting PROXY_PORT_SETTING = Setting.intSetting("cloud.aws.proxy.port", 80, 0, 1<<16, false, Setting.Scope.CLUSTER); + Setting PROXY_PORT_SETTING = Setting.intSetting("cloud.aws.proxy.port", 80, 0, 1<<16, Property.NodeScope); /** * cloud.aws.proxy.username: In case of proxy with auth, define the username. Shared with discovery-ec2 plugin */ - Setting PROXY_USERNAME_SETTING = Setting.simpleString("cloud.aws.proxy.username", false, Setting.Scope.CLUSTER); + Setting PROXY_USERNAME_SETTING = Setting.simpleString("cloud.aws.proxy.username", Property.NodeScope); /** * cloud.aws.proxy.password: In case of proxy with auth, define the password. Shared with discovery-ec2 plugin */ - Setting PROXY_PASSWORD_SETTING = Setting.simpleString("cloud.aws.proxy.password", false, Setting.Scope.CLUSTER); + Setting PROXY_PASSWORD_SETTING = + Setting.simpleString("cloud.aws.proxy.password", Property.NodeScope, Property.Filtered); /** * cloud.aws.signer: If you are using an old AWS API version, you can define a Signer. Shared with discovery-ec2 plugin */ - Setting SIGNER_SETTING = Setting.simpleString("cloud.aws.signer", false, Setting.Scope.CLUSTER); + Setting SIGNER_SETTING = Setting.simpleString("cloud.aws.signer", Property.NodeScope); /** * cloud.aws.region: Region. Shared with discovery-ec2 plugin */ - Setting REGION_SETTING = new Setting<>("cloud.aws.region", "", s -> s.toLowerCase(Locale.ROOT), false, Setting.Scope.CLUSTER); + Setting REGION_SETTING = + new Setting<>("cloud.aws.region", "", s -> s.toLowerCase(Locale.ROOT), Property.NodeScope); /** * Defines specific s3 settings starting with cloud.aws.s3. @@ -82,68 +87,70 @@ public interface AwsS3Service extends LifecycleComponent { * @see AwsS3Service#KEY_SETTING */ Setting KEY_SETTING = - new Setting<>("cloud.aws.s3.access_key", AwsS3Service.KEY_SETTING, Function.identity(), false, Setting.Scope.CLUSTER); + new Setting<>("cloud.aws.s3.access_key", AwsS3Service.KEY_SETTING, Function.identity(), + Property.NodeScope, Property.Filtered); /** * cloud.aws.s3.secret_key: AWS Secret key specific for S3 API calls. Defaults to cloud.aws.secret_key. * @see AwsS3Service#SECRET_SETTING */ Setting SECRET_SETTING = - new Setting<>("cloud.aws.s3.secret_key", AwsS3Service.SECRET_SETTING, Function.identity(), false, Setting.Scope.CLUSTER); + new Setting<>("cloud.aws.s3.secret_key", AwsS3Service.SECRET_SETTING, Function.identity(), + Property.NodeScope, Property.Filtered); /** * cloud.aws.s3.protocol: Protocol for AWS API specific for S3 API calls: http or https. Defaults to cloud.aws.protocol. * @see AwsS3Service#PROTOCOL_SETTING */ Setting PROTOCOL_SETTING = - new Setting<>("cloud.aws.s3.protocol", AwsS3Service.PROTOCOL_SETTING, s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), false, - Setting.Scope.CLUSTER); + new Setting<>("cloud.aws.s3.protocol", AwsS3Service.PROTOCOL_SETTING, s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), + Property.NodeScope); /** * cloud.aws.s3.proxy.host: In case of proxy, define its hostname/IP specific for S3 API calls. Defaults to cloud.aws.proxy.host. * @see AwsS3Service#PROXY_HOST_SETTING */ Setting PROXY_HOST_SETTING = - new Setting<>("cloud.aws.s3.proxy.host", AwsS3Service.PROXY_HOST_SETTING, Function.identity(), false, Setting.Scope.CLUSTER); + new Setting<>("cloud.aws.s3.proxy.host", AwsS3Service.PROXY_HOST_SETTING, Function.identity(), + Property.NodeScope); /** * cloud.aws.s3.proxy.port: In case of proxy, define its port specific for S3 API calls. Defaults to cloud.aws.proxy.port. * @see AwsS3Service#PROXY_PORT_SETTING */ Setting PROXY_PORT_SETTING = new Setting<>("cloud.aws.s3.proxy.port", AwsS3Service.PROXY_PORT_SETTING, - s -> Setting.parseInt(s, 0, 1<<16, "cloud.aws.s3.proxy.port"), false, Setting.Scope.CLUSTER); + s -> Setting.parseInt(s, 0, 1<<16, "cloud.aws.s3.proxy.port"), Property.NodeScope); /** * cloud.aws.s3.proxy.username: In case of proxy with auth, define the username specific for S3 API calls. * Defaults to cloud.aws.proxy.username. * @see AwsS3Service#PROXY_USERNAME_SETTING */ Setting PROXY_USERNAME_SETTING = - new Setting<>("cloud.aws.s3.proxy.username", AwsS3Service.PROXY_USERNAME_SETTING, Function.identity(), false, - Setting.Scope.CLUSTER); + new Setting<>("cloud.aws.s3.proxy.username", AwsS3Service.PROXY_USERNAME_SETTING, Function.identity(), + Property.NodeScope); /** * cloud.aws.s3.proxy.password: In case of proxy with auth, define the password specific for S3 API calls. * Defaults to cloud.aws.proxy.password. * @see AwsS3Service#PROXY_PASSWORD_SETTING */ Setting PROXY_PASSWORD_SETTING = - new Setting<>("cloud.aws.s3.proxy.password", AwsS3Service.PROXY_PASSWORD_SETTING, Function.identity(), false, - Setting.Scope.CLUSTER); + new Setting<>("cloud.aws.s3.proxy.password", AwsS3Service.PROXY_PASSWORD_SETTING, Function.identity(), + Property.NodeScope, Property.Filtered); /** * cloud.aws.s3.signer: If you are using an old AWS API version, you can define a Signer. Specific for S3 API calls. * Defaults to cloud.aws.signer. * @see AwsS3Service#SIGNER_SETTING */ Setting SIGNER_SETTING = - new Setting<>("cloud.aws.s3.signer", AwsS3Service.SIGNER_SETTING, Function.identity(), false, Setting.Scope.CLUSTER); + new Setting<>("cloud.aws.s3.signer", AwsS3Service.SIGNER_SETTING, Function.identity(), Property.NodeScope); /** * cloud.aws.s3.region: Region specific for S3 API calls. Defaults to cloud.aws.region. * @see AwsS3Service#REGION_SETTING */ Setting REGION_SETTING = - new Setting<>("cloud.aws.s3.region", AwsS3Service.REGION_SETTING, s -> s.toLowerCase(Locale.ROOT), false, - Setting.Scope.CLUSTER); + new Setting<>("cloud.aws.s3.region", AwsS3Service.REGION_SETTING, s -> s.toLowerCase(Locale.ROOT), + Property.NodeScope); /** * cloud.aws.s3.endpoint: Endpoint. If not set, endpoint will be guessed based on region setting. */ - Setting ENDPOINT_SETTING = - Setting.simpleString("cloud.aws.s3.endpoint", false, Setting.Scope.CLUSTER); + Setting ENDPOINT_SETTING = Setting.simpleString("cloud.aws.s3.endpoint", Property.NodeScope); } AmazonS3 client(String endpoint, Protocol protocol, String region, String account, String key, Integer maxRetries); diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsSigner.java b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsSigner.java index c94491696c0..5c02671e5e9 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsSigner.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsSigner.java @@ -64,7 +64,7 @@ public class AwsSigner { try { validateSignerType(signer, endpoint); } catch (IllegalArgumentException e) { - logger.warn(e.getMessage()); + logger.warn("{}", e.getMessage()); } configuration.setSignerOverride(signer); diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/plugin/repository/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/elasticsearch/plugin/repository/s3/S3RepositoryPlugin.java index 5d21bb4e2ac..d07d8c174c5 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/plugin/repository/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/plugin/repository/s3/S3RepositoryPlugin.java @@ -144,16 +144,6 @@ public class S3RepositoryPlugin extends Plugin { settingsModule.registerSetting(S3Repository.Repository.STORAGE_CLASS_SETTING); settingsModule.registerSetting(S3Repository.Repository.CANNED_ACL_SETTING); settingsModule.registerSetting(S3Repository.Repository.BASE_PATH_SETTING); - - // Filter global settings - settingsModule.registerSettingsFilterIfMissing(AwsS3Service.KEY_SETTING.getKey()); - settingsModule.registerSettingsFilterIfMissing(AwsS3Service.SECRET_SETTING.getKey()); - settingsModule.registerSettingsFilterIfMissing(AwsS3Service.PROXY_PASSWORD_SETTING.getKey()); - settingsModule.registerSettingsFilterIfMissing(AwsS3Service.CLOUD_S3.KEY_SETTING.getKey()); - settingsModule.registerSettingsFilterIfMissing(AwsS3Service.CLOUD_S3.SECRET_SETTING.getKey()); - settingsModule.registerSettingsFilterIfMissing(AwsS3Service.CLOUD_S3.PROXY_PASSWORD_SETTING.getKey()); - settingsModule.registerSettingsFilter(S3Repository.Repository.KEY_SETTING.getKey()); - settingsModule.registerSettingsFilter(S3Repository.Repository.SECRET_SETTING.getKey()); } /** diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index 3edead0765e..fde774a6b92 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.snapshots.IndexShardRepository; @@ -65,70 +66,78 @@ public class S3Repository extends BlobStoreRepository { * repositories.s3.access_key: AWS Access key specific for all S3 Repositories API calls. Defaults to cloud.aws.s3.access_key. * @see CLOUD_S3#KEY_SETTING */ - Setting KEY_SETTING = new Setting<>("repositories.s3.access_key", CLOUD_S3.KEY_SETTING, Function.identity(), false, Setting.Scope.CLUSTER); + Setting KEY_SETTING = + new Setting<>("repositories.s3.access_key", CLOUD_S3.KEY_SETTING, Function.identity(), Property.NodeScope); /** * repositories.s3.secret_key: AWS Secret key specific for all S3 Repositories API calls. Defaults to cloud.aws.s3.secret_key. * @see CLOUD_S3#SECRET_SETTING */ - Setting SECRET_SETTING = new Setting<>("repositories.s3.secret_key", CLOUD_S3.SECRET_SETTING, Function.identity(), false, Setting.Scope.CLUSTER); + Setting SECRET_SETTING = + new Setting<>("repositories.s3.secret_key", CLOUD_S3.SECRET_SETTING, Function.identity(), Property.NodeScope); /** * repositories.s3.region: Region specific for all S3 Repositories API calls. Defaults to cloud.aws.s3.region. * @see CLOUD_S3#REGION_SETTING */ - Setting REGION_SETTING = new Setting<>("repositories.s3.region", CLOUD_S3.REGION_SETTING, s -> s.toLowerCase(Locale.ROOT), false, Setting.Scope.CLUSTER); + Setting REGION_SETTING = + new Setting<>("repositories.s3.region", CLOUD_S3.REGION_SETTING, s -> s.toLowerCase(Locale.ROOT), Property.NodeScope); /** * repositories.s3.endpoint: Endpoint specific for all S3 Repositories API calls. Defaults to cloud.aws.s3.endpoint. * @see CLOUD_S3#ENDPOINT_SETTING */ - Setting ENDPOINT_SETTING = new Setting<>("repositories.s3.endpoint", CLOUD_S3.ENDPOINT_SETTING, s -> s.toLowerCase(Locale.ROOT), false, Setting.Scope.CLUSTER); + Setting ENDPOINT_SETTING = + new Setting<>("repositories.s3.endpoint", CLOUD_S3.ENDPOINT_SETTING, s -> s.toLowerCase(Locale.ROOT), Property.NodeScope); /** * repositories.s3.protocol: Protocol specific for all S3 Repositories API calls. Defaults to cloud.aws.s3.protocol. * @see CLOUD_S3#PROTOCOL_SETTING */ - Setting PROTOCOL_SETTING = new Setting<>("repositories.s3.protocol", CLOUD_S3.PROTOCOL_SETTING, s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), false, Setting.Scope.CLUSTER); + Setting PROTOCOL_SETTING = + new Setting<>("repositories.s3.protocol", CLOUD_S3.PROTOCOL_SETTING, s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope); /** * repositories.s3.bucket: The name of the bucket to be used for snapshots. */ - Setting BUCKET_SETTING = Setting.simpleString("repositories.s3.bucket", false, Setting.Scope.CLUSTER); + Setting BUCKET_SETTING = Setting.simpleString("repositories.s3.bucket", Property.NodeScope); /** * repositories.s3.server_side_encryption: When set to true files are encrypted on server side using AES256 algorithm. * Defaults to false. */ - Setting SERVER_SIDE_ENCRYPTION_SETTING = Setting.boolSetting("repositories.s3.server_side_encryption", false, false, Setting.Scope.CLUSTER); + Setting SERVER_SIDE_ENCRYPTION_SETTING = + Setting.boolSetting("repositories.s3.server_side_encryption", false, Property.NodeScope); /** * repositories.s3.buffer_size: Minimum threshold below which the chunk is uploaded using a single request. Beyond this threshold, * the S3 repository will use the AWS Multipart Upload API to split the chunk into several parts, each of buffer_size length, and * to upload each part in its own request. Note that setting a buffer size lower than 5mb is not allowed since it will prevents the * use of the Multipart API and may result in upload errors. Defaults to 5mb. */ - Setting BUFFER_SIZE_SETTING = Setting.byteSizeSetting("repositories.s3.buffer_size", S3BlobStore.MIN_BUFFER_SIZE, false, Setting.Scope.CLUSTER); + Setting BUFFER_SIZE_SETTING = + Setting.byteSizeSetting("repositories.s3.buffer_size", S3BlobStore.MIN_BUFFER_SIZE, Property.NodeScope); /** * repositories.s3.max_retries: Number of retries in case of S3 errors. Defaults to 3. */ - Setting MAX_RETRIES_SETTING = Setting.intSetting("repositories.s3.max_retries", 3, false, Setting.Scope.CLUSTER); + Setting MAX_RETRIES_SETTING = Setting.intSetting("repositories.s3.max_retries", 3, Property.NodeScope); /** * repositories.s3.chunk_size: Big files can be broken down into chunks during snapshotting if needed. Defaults to 100m. */ - Setting CHUNK_SIZE_SETTING = Setting.byteSizeSetting("repositories.s3.chunk_size", new ByteSizeValue(100, ByteSizeUnit.MB), false, Setting.Scope.CLUSTER); + Setting CHUNK_SIZE_SETTING = + Setting.byteSizeSetting("repositories.s3.chunk_size", new ByteSizeValue(100, ByteSizeUnit.MB), Property.NodeScope); /** * repositories.s3.compress: When set to true metadata files are stored in compressed format. This setting doesn’t affect index * files that are already compressed by default. Defaults to false. */ - Setting COMPRESS_SETTING = Setting.boolSetting("repositories.s3.compress", false, false, Setting.Scope.CLUSTER); + Setting COMPRESS_SETTING = Setting.boolSetting("repositories.s3.compress", false, Property.NodeScope); /** * repositories.s3.storage_class: Sets the S3 storage class type for the backup files. Values may be standard, reduced_redundancy, * standard_ia. Defaults to standard. */ - Setting STORAGE_CLASS_SETTING = Setting.simpleString("repositories.s3.storage_class", false, Setting.Scope.CLUSTER); + Setting STORAGE_CLASS_SETTING = Setting.simpleString("repositories.s3.storage_class", Property.NodeScope); /** * repositories.s3.canned_acl: The S3 repository supports all S3 canned ACLs : private, public-read, public-read-write, * authenticated-read, log-delivery-write, bucket-owner-read, bucket-owner-full-control. Defaults to private. */ - Setting CANNED_ACL_SETTING = Setting.simpleString("repositories.s3.canned_acl", false, Setting.Scope.CLUSTER); + Setting CANNED_ACL_SETTING = Setting.simpleString("repositories.s3.canned_acl", Property.NodeScope); /** * repositories.s3.base_path: Specifies the path within bucket to repository data. Defaults to root directory. */ - Setting BASE_PATH_SETTING = Setting.simpleString("repositories.s3.base_path", false, Setting.Scope.CLUSTER); + Setting BASE_PATH_SETTING = Setting.simpleString("repositories.s3.base_path", Property.NodeScope); } /** @@ -140,72 +149,75 @@ public class S3Repository extends BlobStoreRepository { * access_key * @see Repositories#KEY_SETTING */ - Setting KEY_SETTING = Setting.simpleString("access_key", false, Setting.Scope.CLUSTER); + Setting KEY_SETTING = Setting.simpleString("access_key", Property.NodeScope, Property.Filtered); /** * secret_key * @see Repositories#SECRET_SETTING */ - Setting SECRET_SETTING = Setting.simpleString("secret_key", false, Setting.Scope.CLUSTER); + Setting SECRET_SETTING = Setting.simpleString("secret_key", Property.NodeScope, Property.Filtered); /** * bucket * @see Repositories#BUCKET_SETTING */ - Setting BUCKET_SETTING = Setting.simpleString("bucket", false, Setting.Scope.CLUSTER); + Setting BUCKET_SETTING = Setting.simpleString("bucket", Property.NodeScope); /** * endpoint * @see Repositories#ENDPOINT_SETTING */ - Setting ENDPOINT_SETTING = Setting.simpleString("endpoint", false, Setting.Scope.CLUSTER); + Setting ENDPOINT_SETTING = Setting.simpleString("endpoint", Property.NodeScope); /** * protocol * @see Repositories#PROTOCOL_SETTING */ - Setting PROTOCOL_SETTING = new Setting<>("protocol", "https", s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), false, Setting.Scope.CLUSTER); + Setting PROTOCOL_SETTING = + new Setting<>("protocol", "https", s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope); /** * region * @see Repositories#REGION_SETTING */ - Setting REGION_SETTING = new Setting<>("region", "", s -> s.toLowerCase(Locale.ROOT), false, Setting.Scope.CLUSTER); + Setting REGION_SETTING = new Setting<>("region", "", s -> s.toLowerCase(Locale.ROOT), Property.NodeScope); /** * server_side_encryption * @see Repositories#SERVER_SIDE_ENCRYPTION_SETTING */ - Setting SERVER_SIDE_ENCRYPTION_SETTING = Setting.boolSetting("server_side_encryption", false, false, Setting.Scope.CLUSTER); + Setting SERVER_SIDE_ENCRYPTION_SETTING = + Setting.boolSetting("server_side_encryption", false, Property.NodeScope); /** * buffer_size * @see Repositories#BUFFER_SIZE_SETTING */ - Setting BUFFER_SIZE_SETTING = Setting.byteSizeSetting("buffer_size", S3BlobStore.MIN_BUFFER_SIZE, false, Setting.Scope.CLUSTER); + Setting BUFFER_SIZE_SETTING = + Setting.byteSizeSetting("buffer_size", S3BlobStore.MIN_BUFFER_SIZE, Property.NodeScope); /** * max_retries * @see Repositories#MAX_RETRIES_SETTING */ - Setting MAX_RETRIES_SETTING = Setting.intSetting("max_retries", 3, false, Setting.Scope.CLUSTER); + Setting MAX_RETRIES_SETTING = Setting.intSetting("max_retries", 3, Property.NodeScope); /** * chunk_size * @see Repositories#CHUNK_SIZE_SETTING */ - Setting CHUNK_SIZE_SETTING = Setting.byteSizeSetting("chunk_size", "-1", false, Setting.Scope.CLUSTER); + Setting CHUNK_SIZE_SETTING = Setting.byteSizeSetting("chunk_size", "-1", Property.NodeScope); /** * compress * @see Repositories#COMPRESS_SETTING */ - Setting COMPRESS_SETTING = Setting.boolSetting("compress", false, false, Setting.Scope.CLUSTER); + Setting COMPRESS_SETTING = Setting.boolSetting("compress", false, Property.NodeScope); /** * storage_class * @see Repositories#STORAGE_CLASS_SETTING */ - Setting STORAGE_CLASS_SETTING = Setting.simpleString("storage_class", false, Setting.Scope.CLUSTER); + Setting STORAGE_CLASS_SETTING = Setting.simpleString("storage_class", Property.NodeScope); /** * canned_acl * @see Repositories#CANNED_ACL_SETTING */ - Setting CANNED_ACL_SETTING = Setting.simpleString("canned_acl", false, Setting.Scope.CLUSTER); + Setting CANNED_ACL_SETTING = Setting.simpleString("canned_acl", Property.NodeScope); /** * base_path * @see Repositories#BASE_PATH_SETTING */ - Setting BASE_PATH_SETTING = Setting.simpleString("base_path", false, Setting.Scope.CLUSTER); + Setting BASE_PATH_SETTING = Setting.simpleString("base_path", Property.NodeScope); } private final S3BlobStore blobStore; diff --git a/plugins/store-smb/src/main/java/org/elasticsearch/index/store/SmbDirectoryWrapper.java b/plugins/store-smb/src/main/java/org/elasticsearch/index/store/SmbDirectoryWrapper.java index 88b9d187dcf..fe2c32723e2 100644 --- a/plugins/store-smb/src/main/java/org/elasticsearch/index/store/SmbDirectoryWrapper.java +++ b/plugins/store-smb/src/main/java/org/elasticsearch/index/store/SmbDirectoryWrapper.java @@ -60,7 +60,7 @@ public final class SmbDirectoryWrapper extends FilterDirectory { static final int CHUNK_SIZE = 8192; public SmbFSIndexOutput(String name) throws IOException { - super("SmbFSIndexOutput(path=\"" + fsDirectory.getDirectory().resolve(name) + "\")", new FilterOutputStream(Channels.newOutputStream(Files.newByteChannel(fsDirectory.getDirectory().resolve(name), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.READ, StandardOpenOption.WRITE))) { + super("SmbFSIndexOutput(path=\"" + fsDirectory.getDirectory().resolve(name) + "\")", name, new FilterOutputStream(Channels.newOutputStream(Files.newByteChannel(fsDirectory.getDirectory().resolve(name), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.READ, StandardOpenOption.WRITE))) { // This implementation ensures, that we never write more than CHUNK_SIZE bytes: @Override public void write(byte[] b, int offset, int length) throws IOException { diff --git a/qa/backwards-5.0/build.gradle b/qa/backwards-5.0/build.gradle new file mode 100644 index 00000000000..93d361c989c --- /dev/null +++ b/qa/backwards-5.0/build.gradle @@ -0,0 +1,23 @@ +apply plugin: 'elasticsearch.rest-test' + +/* This project runs the core REST tests against a 2 node cluster where one of the nodes has a different minor. + * Since we don't have a version to test against we currently use the hardcoded snapshot for to bascially run + * against ourself. To test that useing a different version works got into distribution/zip and execute: + * gradle clean install -Dbuild.snapshot=false + * + * This installs the release-build into a local .m2 repository, then change this version here to: + * bwcVersion = "5.0.0" + * + * now you can run the bwc tests with: + * gradle check -Drepos.mavenlocal=true + * + * (-Drepos.mavenlocal=true will force gradle to look for the zip distribuiton in the local .m2 repository) + */ +integTest { + includePackaged = true + cluster { + numNodes = 2 + numBwcNodes = 1 + bwcVersion = "5.0.0-SNAPSHOT" // this is the same as the current version until we released the first RC + } +} diff --git a/qa/backwards-5.0/src/test/java/org/elasticsearch/backwards/MultiNodeBackwardsIT.java b/qa/backwards-5.0/src/test/java/org/elasticsearch/backwards/MultiNodeBackwardsIT.java new file mode 100644 index 00000000000..1f3ad15d1bf --- /dev/null +++ b/qa/backwards-5.0/src/test/java/org/elasticsearch/backwards/MultiNodeBackwardsIT.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.backwards; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; +import org.apache.lucene.util.TimeUnits; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.RestTestCandidate; +import org.elasticsearch.test.rest.parser.RestTestParseException; + +import java.io.IOException; + +@TimeoutSuite(millis = 40 * TimeUnits.MINUTE) // some of the windows test VMs are slow as hell +public class MultiNodeBackwardsIT extends ESRestTestCase { + + public MultiNodeBackwardsIT(RestTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws IOException, RestTestParseException { + return createParameters(0, 1); + } +} + diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/BootstrapCliParserTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/BootstrapCliParserTests.java deleted file mode 100644 index 012af99cef0..00000000000 --- a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/BootstrapCliParserTests.java +++ /dev/null @@ -1,268 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.bootstrap; - -import org.elasticsearch.Build; -import org.elasticsearch.Version; -import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.cli.CliTool.ExitStatus; -import org.elasticsearch.common.cli.CliToolTestCase; -import org.elasticsearch.common.cli.UserError; -import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.monitor.jvm.JvmInfo; -import org.hamcrest.Matcher; -import org.junit.After; -import org.junit.Before; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.Properties; - -import static org.elasticsearch.common.cli.CliTool.ExitStatus.OK; -import static org.elasticsearch.common.cli.CliTool.ExitStatus.OK_AND_EXIT; -import static org.elasticsearch.common.cli.CliTool.ExitStatus.USAGE; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.hasItem; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.nullValue; - -@SuppressForbidden(reason = "modifies system properties intentionally") -public class BootstrapCliParserTests extends CliToolTestCase { - - private CaptureOutputTerminal terminal = new CaptureOutputTerminal(); - private List propertiesToClear = new ArrayList<>(); - private Map properties; - - @Before - public void before() { - this.properties = new HashMap<>(System.getProperties()); - } - - @After - public void clearProperties() { - for (String property : propertiesToClear) { - System.clearProperty(property); - } - propertiesToClear.clear(); - assertEquals("properties leaked", properties, new HashMap<>(System.getProperties())); - } - - public void testThatVersionIsReturned() throws Exception { - BootstrapCLIParser parser = new BootstrapCLIParser(terminal); - ExitStatus status = parser.execute(args("version")); - assertStatus(status, OK_AND_EXIT); - - assertThatTerminalOutput(containsString(Version.CURRENT.toString())); - assertThatTerminalOutput(containsString(Build.CURRENT.shortHash())); - assertThatTerminalOutput(containsString(Build.CURRENT.date())); - assertThatTerminalOutput(containsString(JvmInfo.jvmInfo().version())); - } - - public void testThatVersionIsReturnedAsStartParameter() throws Exception { - BootstrapCLIParser parser = new BootstrapCLIParser(terminal); - ExitStatus status = parser.execute(args("start -V")); - assertStatus(status, OK_AND_EXIT); - - assertThatTerminalOutput(containsString(Version.CURRENT.toString())); - assertThatTerminalOutput(containsString(Build.CURRENT.shortHash())); - assertThatTerminalOutput(containsString(Build.CURRENT.date())); - assertThatTerminalOutput(containsString(JvmInfo.jvmInfo().version())); - - CaptureOutputTerminal terminal = new CaptureOutputTerminal(); - parser = new BootstrapCLIParser(terminal); - status = parser.execute(args("start --version")); - assertStatus(status, OK_AND_EXIT); - - assertThatTerminalOutput(containsString(Version.CURRENT.toString())); - assertThatTerminalOutput(containsString(Build.CURRENT.shortHash())); - assertThatTerminalOutput(containsString(Build.CURRENT.date())); - assertThatTerminalOutput(containsString(JvmInfo.jvmInfo().version())); - } - - public void testThatPidFileCanBeConfigured() throws Exception { - BootstrapCLIParser parser = new BootstrapCLIParser(terminal); - registerProperties("es.pidfile"); - - ExitStatus status = parser.execute(args("start --pidfile")); // missing pid file - assertStatus(status, USAGE); - - // good cases - status = parser.execute(args("start --pidfile /tmp/pid")); - assertStatus(status, OK); - assertSystemProperty("es.pidfile", "/tmp/pid"); - - System.clearProperty("es.pidfile"); - status = parser.execute(args("start -p /tmp/pid")); - assertStatus(status, OK); - assertSystemProperty("es.pidfile", "/tmp/pid"); - } - - public void testThatParsingDaemonizeWorks() throws Exception { - BootstrapCLIParser parser = new BootstrapCLIParser(terminal); - registerProperties("es.foreground"); - - ExitStatus status = parser.execute(args("start -d")); - assertStatus(status, OK); - assertThat(System.getProperty("es.foreground"), is("false")); - } - - public void testThatNotDaemonizingDoesNotConfigureProperties() throws Exception { - BootstrapCLIParser parser = new BootstrapCLIParser(terminal); - registerProperties("es.foreground"); - - ExitStatus status = parser.execute(args("start")); - assertStatus(status, OK); - assertThat(System.getProperty("es.foreground"), is(nullValue())); - } - - public void testThatJavaPropertyStyleArgumentsCanBeParsed() throws Exception { - BootstrapCLIParser parser = new BootstrapCLIParser(terminal); - registerProperties("es.foo", "es.spam"); - - ExitStatus status = parser.execute(args("start -Dfoo=bar -Dspam=eggs")); - assertStatus(status, OK); - assertSystemProperty("es.foo", "bar"); - assertSystemProperty("es.spam", "eggs"); - } - - public void testThatJavaPropertyStyleArgumentsWithEsPrefixAreNotPrefixedTwice() throws Exception { - BootstrapCLIParser parser = new BootstrapCLIParser(terminal); - registerProperties("es.spam", "es.pidfile"); - - ExitStatus status = parser.execute(args("start -Des.pidfile=/path/to/foo/elasticsearch/distribution/zip/target/integ-tests/es.pid -Dspam=eggs")); - assertStatus(status, OK); - assertThat(System.getProperty("es.es.pidfile"), is(nullValue())); - assertSystemProperty("es.pidfile", "/path/to/foo/elasticsearch/distribution/zip/target/integ-tests/es.pid"); - assertSystemProperty("es.spam", "eggs"); - } - - public void testThatUnknownLongOptionsCanBeParsed() throws Exception { - BootstrapCLIParser parser = new BootstrapCLIParser(terminal); - registerProperties("es.network.host", "es.my.option"); - - ExitStatus status = parser.execute(args("start --network.host 127.0.0.1 --my.option=true")); - assertStatus(status, OK); - assertSystemProperty("es.network.host", "127.0.0.1"); - assertSystemProperty("es.my.option", "true"); - } - - public void testThatUnknownLongOptionsNeedAValue() throws Exception { - BootstrapCLIParser parser = new BootstrapCLIParser(terminal); - registerProperties("es.network.host"); - - ExitStatus status = parser.execute(args("start --network.host")); - assertStatus(status, USAGE); - assertThatTerminalOutput(containsString("Parameter [network.host] needs value")); - - status = parser.execute(args("start --network.host --foo")); - assertStatus(status, USAGE); - assertThatTerminalOutput(containsString("Parameter [network.host] needs value")); - } - - public void testParsingErrors() throws Exception { - BootstrapCLIParser parser = new BootstrapCLIParser(terminal); - - // unknown params - ExitStatus status = parser.execute(args("version --unknown-param /tmp/pid")); - assertStatus(status, USAGE); - assertThatTerminalOutput(containsString("Unrecognized option: --unknown-param")); - - // single dash in extra params - terminal = new CaptureOutputTerminal(); - parser = new BootstrapCLIParser(terminal); - status = parser.execute(args("start -network.host 127.0.0.1")); - assertStatus(status, USAGE); - assertThatTerminalOutput(containsString("Parameter [-network.host]does not start with --")); - - // never ended parameter - terminal = new CaptureOutputTerminal(); - parser = new BootstrapCLIParser(terminal); - status = parser.execute(args("start --network.host")); - assertStatus(status, USAGE); - assertThatTerminalOutput(containsString("Parameter [network.host] needs value")); - - // free floating value - terminal = new CaptureOutputTerminal(); - parser = new BootstrapCLIParser(terminal); - status = parser.execute(args("start 127.0.0.1")); - assertStatus(status, USAGE); - assertThatTerminalOutput(containsString("Parameter [127.0.0.1]does not start with --")); - } - - public void testHelpWorks() throws Exception { - List> tuples = new ArrayList<>(); - tuples.add(new Tuple<>("version --help", "elasticsearch-version.help")); - tuples.add(new Tuple<>("version -h", "elasticsearch-version.help")); - tuples.add(new Tuple<>("start --help", "elasticsearch-start.help")); - tuples.add(new Tuple<>("start -h", "elasticsearch-start.help")); - tuples.add(new Tuple<>("--help", "elasticsearch.help")); - tuples.add(new Tuple<>("-h", "elasticsearch.help")); - - for (Tuple tuple : tuples) { - terminal = new CaptureOutputTerminal(); - BootstrapCLIParser parser = new BootstrapCLIParser(terminal); - ExitStatus status = parser.execute(args(tuple.v1())); - assertStatus(status, OK_AND_EXIT); - assertTerminalOutputContainsHelpFile(terminal, "/org/elasticsearch/bootstrap/" + tuple.v2()); - } - } - - public void testThatSpacesInParametersAreSupported() throws Exception { - // emulates: bin/elasticsearch --node.name "'my node with spaces'" --pidfile "'/tmp/my pid.pid'" - BootstrapCLIParser parser = new BootstrapCLIParser(terminal); - registerProperties("es.pidfile", "es.my.param"); - - ExitStatus status = parser.execute("start", "--pidfile", "foo with space", "--my.param", "my awesome neighbour"); - assertStatus(status, OK); - assertSystemProperty("es.pidfile", "foo with space"); - assertSystemProperty("es.my.param", "my awesome neighbour"); - - } - - public void testThatHelpfulErrorMessageIsGivenWhenParametersAreOutOfOrder() throws Exception { - BootstrapCLIParser parser = new BootstrapCLIParser(terminal); - UserError e = expectThrows(UserError.class, () -> { - parser.parse("start", new String[]{"--foo=bar", "-Dbaz=qux"}); - }); - assertThat(e.getMessage(), containsString("must be before any parameters starting with --")); - assertNull(System.getProperty("es.foo")); - } - - private void registerProperties(String ... systemProperties) { - propertiesToClear.addAll(Arrays.asList(systemProperties)); - } - - private void assertSystemProperty(String name, String expectedValue) { - String msg = String.format(Locale.ROOT, "Expected property %s to be %s, terminal output was %s", name, expectedValue, terminal.getTerminalOutput()); - assertThat(msg, System.getProperty(name), is(expectedValue)); - } - - private void assertStatus(ExitStatus status, ExitStatus expectedStatus) { - assertThat(String.format(Locale.ROOT, "Expected status to be [%s], but was [%s], terminal output was %s", expectedStatus, status, terminal.getTerminalOutput()), status, is(expectedStatus)); - } - - private void assertThatTerminalOutput(Matcher matcher) { - assertThat(terminal.getTerminalOutput(), hasItem(matcher)); - } -} diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/cli/CheckFileCommandTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/cli/CheckFileCommandTests.java deleted file mode 100644 index 45f3df22cd7..00000000000 --- a/qa/evil-tests/src/test/java/org/elasticsearch/common/cli/CheckFileCommandTests.java +++ /dev/null @@ -1,329 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.cli; - -import com.google.common.jimfs.Configuration; -import com.google.common.jimfs.Jimfs; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.set.Sets; -import org.elasticsearch.env.Environment; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.nio.file.FileSystem; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.attribute.GroupPrincipal; -import java.nio.file.attribute.PosixFileAttributeView; -import java.nio.file.attribute.PosixFileAttributes; -import java.nio.file.attribute.PosixFilePermission; -import java.nio.file.attribute.UserPrincipal; -import java.util.Set; - -import static org.hamcrest.Matchers.allOf; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.hasItem; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.is; - -/** - * - */ -public class CheckFileCommandTests extends ESTestCase { - - private CliToolTestCase.CaptureOutputTerminal captureOutputTerminal = new CliToolTestCase.CaptureOutputTerminal(); - - private Configuration jimFsConfiguration = Configuration.unix().toBuilder().setAttributeViews("basic", "owner", "posix", "unix").build(); - private Configuration jimFsConfigurationWithoutPermissions = randomBoolean() ? Configuration.unix().toBuilder().setAttributeViews("basic").build() : Configuration.windows(); - - private enum Mode { - CHANGE, KEEP, DISABLED - } - - public void testThatCommandLogsErrorMessageOnFail() throws Exception { - executeCommand(jimFsConfiguration, new PermissionCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.CHANGE)); - assertThat(captureOutputTerminal.getTerminalOutput(), hasItem(containsString("Please ensure that the user account running Elasticsearch has read access to this file"))); - } - - public void testThatCommandLogsNothingWhenPermissionRemains() throws Exception { - executeCommand(jimFsConfiguration, new PermissionCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.KEEP)); - assertThat(captureOutputTerminal.getTerminalOutput(), hasSize(0)); - } - - public void testThatCommandLogsNothingWhenDisabled() throws Exception { - executeCommand(jimFsConfiguration, new PermissionCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.DISABLED)); - assertThat(captureOutputTerminal.getTerminalOutput(), hasSize(0)); - } - - public void testThatCommandLogsNothingIfFilesystemDoesNotSupportPermissions() throws Exception { - executeCommand(jimFsConfigurationWithoutPermissions, new PermissionCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.DISABLED)); - assertThat(captureOutputTerminal.getTerminalOutput(), hasSize(0)); - } - - public void testThatCommandLogsOwnerChange() throws Exception { - executeCommand(jimFsConfiguration, new OwnerCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.CHANGE)); - assertThat(captureOutputTerminal.getTerminalOutput(), hasItem(allOf(containsString("Owner of file ["), containsString("] used to be ["), containsString("], but now is [")))); - } - - public void testThatCommandLogsNothingIfOwnerRemainsSame() throws Exception { - executeCommand(jimFsConfiguration, new OwnerCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.KEEP)); - assertThat(captureOutputTerminal.getTerminalOutput(), hasSize(0)); - } - - public void testThatCommandLogsNothingIfOwnerIsDisabled() throws Exception { - executeCommand(jimFsConfiguration, new OwnerCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.DISABLED)); - assertThat(captureOutputTerminal.getTerminalOutput(), hasSize(0)); - } - - public void testThatCommandLogsNothingIfFileSystemDoesNotSupportOwners() throws Exception { - executeCommand(jimFsConfigurationWithoutPermissions, new OwnerCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.DISABLED)); - assertThat(captureOutputTerminal.getTerminalOutput(), hasSize(0)); - } - - public void testThatCommandLogsIfGroupChanges() throws Exception { - executeCommand(jimFsConfiguration, new GroupCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.CHANGE)); - assertThat(captureOutputTerminal.getTerminalOutput(), hasItem(allOf(containsString("Group of file ["), containsString("] used to be ["), containsString("], but now is [")))); - } - - public void testThatCommandLogsNothingIfGroupRemainsSame() throws Exception { - executeCommand(jimFsConfiguration, new GroupCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.KEEP)); - assertThat(captureOutputTerminal.getTerminalOutput(), hasSize(0)); - } - - public void testThatCommandLogsNothingIfGroupIsDisabled() throws Exception { - executeCommand(jimFsConfiguration, new GroupCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.DISABLED)); - assertThat(captureOutputTerminal.getTerminalOutput(), hasSize(0)); - } - - public void testThatCommandLogsNothingIfFileSystemDoesNotSupportGroups() throws Exception { - executeCommand(jimFsConfigurationWithoutPermissions, new GroupCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.DISABLED)); - assertThat(captureOutputTerminal.getTerminalOutput(), hasSize(0)); - } - - public void testThatCommandDoesNotLogAnythingOnFileCreation() throws Exception { - Configuration configuration = randomBoolean() ? jimFsConfiguration : jimFsConfigurationWithoutPermissions; - - try (FileSystem fs = Jimfs.newFileSystem(configuration)) { - Path path = fs.getPath(randomAsciiOfLength(10)); - Settings settings = Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .build(); - new CreateFileCommand(captureOutputTerminal, path).execute(settings, new Environment(settings)); - assertThat(Files.exists(path), is(true)); - } - - assertThat(captureOutputTerminal.getTerminalOutput(), hasSize(0)); - } - - public void testThatCommandWorksIfFileIsDeletedByCommand() throws Exception { - Configuration configuration = randomBoolean() ? jimFsConfiguration : jimFsConfigurationWithoutPermissions; - - try (FileSystem fs = Jimfs.newFileSystem(configuration)) { - Path path = fs.getPath(randomAsciiOfLength(10)); - Files.write(path, "anything".getBytes(StandardCharsets.UTF_8)); - - Settings settings = Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .build(); - new DeleteFileCommand(captureOutputTerminal, path).execute(settings, new Environment(settings)); - assertThat(Files.exists(path), is(false)); - } - - assertThat(captureOutputTerminal.getTerminalOutput(), hasSize(0)); - } - - private void executeCommand(Configuration configuration, AbstractTestCheckFileCommand command) throws Exception { - try (FileSystem fs = Jimfs.newFileSystem(configuration)) { - command.execute(fs); - } - } - - abstract class AbstractTestCheckFileCommand extends CheckFileCommand { - - protected final Mode mode; - protected FileSystem fs; - protected Path[] paths; - final Path baseDir; - - public AbstractTestCheckFileCommand(Path baseDir, Terminal terminal, Mode mode) throws IOException { - super(terminal); - this.mode = mode; - this.baseDir = baseDir; - } - - public CliTool.ExitStatus execute(FileSystem fs) throws Exception { - this.fs = fs; - this.paths = new Path[] { writePath(fs, "p1", "anything"), writePath(fs, "p2", "anything"), writePath(fs, "p3", "anything") }; - Settings settings = Settings.settingsBuilder() - .put(Environment.PATH_HOME_SETTING.getKey(), baseDir.toString()) - .build(); - return super.execute(Settings.EMPTY, new Environment(settings)); - } - - private Path writePath(FileSystem fs, String name, String content) throws IOException { - Path path = fs.getPath(name); - Files.write(path, content.getBytes(StandardCharsets.UTF_8)); - return path; - } - - @Override - protected Path[] pathsForPermissionsCheck(Settings settings, Environment env) { - return paths; - } - } - - /** - * command that changes permissions from a file if enabled - */ - class PermissionCheckFileCommand extends AbstractTestCheckFileCommand { - - public PermissionCheckFileCommand(Path baseDir, Terminal terminal, Mode mode) throws IOException { - super(baseDir, terminal, mode); - } - - @Override - public CliTool.ExitStatus doExecute(Settings settings, Environment env) throws Exception { - int randomInt = randomInt(paths.length - 1); - Path randomPath = paths[randomInt]; - switch (mode) { - case CHANGE: - Files.write(randomPath, randomAsciiOfLength(10).getBytes(StandardCharsets.UTF_8)); - Files.setPosixFilePermissions(randomPath, Sets.newHashSet(PosixFilePermission.OWNER_EXECUTE, PosixFilePermission.OTHERS_EXECUTE, PosixFilePermission.GROUP_EXECUTE)); - break; - case KEEP: - Files.write(randomPath, randomAsciiOfLength(10).getBytes(StandardCharsets.UTF_8)); - Set posixFilePermissions = Files.getPosixFilePermissions(randomPath); - Files.setPosixFilePermissions(randomPath, posixFilePermissions); - break; - } - return CliTool.ExitStatus.OK; - } - - } - - /** - * command that changes the owner of a file if enabled - */ - class OwnerCheckFileCommand extends AbstractTestCheckFileCommand { - - public OwnerCheckFileCommand(Path baseDir, Terminal terminal, Mode mode) throws IOException { - super(baseDir, terminal, mode); - } - - @Override - public CliTool.ExitStatus doExecute(Settings settings, Environment env) throws Exception { - int randomInt = randomInt(paths.length - 1); - Path randomPath = paths[randomInt]; - switch (mode) { - case CHANGE: - Files.write(randomPath, randomAsciiOfLength(10).getBytes(StandardCharsets.UTF_8)); - UserPrincipal randomOwner = fs.getUserPrincipalLookupService().lookupPrincipalByName(randomAsciiOfLength(10)); - Files.setOwner(randomPath, randomOwner); - break; - case KEEP: - Files.write(randomPath, randomAsciiOfLength(10).getBytes(StandardCharsets.UTF_8)); - UserPrincipal originalOwner = Files.getOwner(randomPath); - Files.setOwner(randomPath, originalOwner); - break; - } - - return CliTool.ExitStatus.OK; - } - } - - /** - * command that changes the group of a file if enabled - */ - class GroupCheckFileCommand extends AbstractTestCheckFileCommand { - - public GroupCheckFileCommand(Path baseDir, Terminal terminal, Mode mode) throws IOException { - super(baseDir, terminal, mode); - } - - @Override - public CliTool.ExitStatus doExecute(Settings settings, Environment env) throws Exception { - int randomInt = randomInt(paths.length - 1); - Path randomPath = paths[randomInt]; - switch (mode) { - case CHANGE: - Files.write(randomPath, randomAsciiOfLength(10).getBytes(StandardCharsets.UTF_8)); - GroupPrincipal randomPrincipal = fs.getUserPrincipalLookupService().lookupPrincipalByGroupName(randomAsciiOfLength(10)); - Files.getFileAttributeView(randomPath, PosixFileAttributeView.class).setGroup(randomPrincipal); - break; - case KEEP: - Files.write(randomPath, randomAsciiOfLength(10).getBytes(StandardCharsets.UTF_8)); - GroupPrincipal groupPrincipal = Files.readAttributes(randomPath, PosixFileAttributes.class).group(); - Files.getFileAttributeView(randomPath, PosixFileAttributeView.class).setGroup(groupPrincipal); - break; - } - - return CliTool.ExitStatus.OK; - } - } - - /** - * A command that creates a non existing file - */ - class CreateFileCommand extends CheckFileCommand { - - private final Path pathToCreate; - - public CreateFileCommand(Terminal terminal, Path pathToCreate) { - super(terminal); - this.pathToCreate = pathToCreate; - } - - @Override - public CliTool.ExitStatus doExecute(Settings settings, Environment env) throws Exception { - Files.write(pathToCreate, "anything".getBytes(StandardCharsets.UTF_8)); - return CliTool.ExitStatus.OK; - } - - @Override - protected Path[] pathsForPermissionsCheck(Settings settings, Environment env) throws Exception { - return new Path[] { pathToCreate }; - } - } - - /** - * A command that deletes an existing file - */ - class DeleteFileCommand extends CheckFileCommand { - - private final Path pathToDelete; - - public DeleteFileCommand(Terminal terminal, Path pathToDelete) { - super(terminal); - this.pathToDelete = pathToDelete; - } - - @Override - public CliTool.ExitStatus doExecute(Settings settings, Environment env) throws Exception { - Files.delete(pathToDelete); - return CliTool.ExitStatus.OK; - } - - @Override - protected Path[] pathsForPermissionsCheck(Settings settings, Environment env) throws Exception { - return new Path[] {pathToDelete}; - } - } -} diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/cli/CliToolTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/cli/CliToolTests.java deleted file mode 100644 index 5033914632a..00000000000 --- a/qa/evil-tests/src/test/java/org/elasticsearch/common/cli/CliToolTests.java +++ /dev/null @@ -1,365 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.cli; - -import org.apache.commons.cli.CommandLine; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.node.internal.InternalSettingsPreparer; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; - -import static java.util.Collections.unmodifiableMap; -import static org.elasticsearch.common.cli.CliTool.ExitStatus.OK; -import static org.elasticsearch.common.cli.CliTool.ExitStatus.USAGE; -import static org.elasticsearch.common.cli.CliToolConfig.Builder.cmd; -import static org.hamcrest.Matchers.arrayContaining; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.hasItem; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.is; - -@SuppressForbidden(reason = "modifies system properties intentionally") -public class CliToolTests extends CliToolTestCase { - public void testOK() throws Exception { - Terminal terminal = new MockTerminal(); - final AtomicReference executed = new AtomicReference<>(false); - final NamedCommand cmd = new NamedCommand("cmd", terminal) { - @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) { - executed.set(true); - return OK; - } - }; - SingleCmdTool tool = new SingleCmdTool("tool", terminal, cmd); - CliTool.ExitStatus status = tool.execute(); - assertStatus(status, OK); - assertCommandHasBeenExecuted(executed); - } - - public void testUsageError() throws Exception { - Terminal terminal = new MockTerminal(); - final AtomicReference executed = new AtomicReference<>(false); - final NamedCommand cmd = new NamedCommand("cmd", terminal) { - @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) throws UserError { - executed.set(true); - throw new UserError(CliTool.ExitStatus.USAGE, "bad usage"); - } - }; - SingleCmdTool tool = new SingleCmdTool("tool", terminal, cmd); - CliTool.ExitStatus status = tool.execute(); - assertStatus(status, CliTool.ExitStatus.USAGE); - assertCommandHasBeenExecuted(executed); - } - - public void testMultiCommand() throws Exception { - Terminal terminal = new MockTerminal(); - int count = randomIntBetween(2, 7); - List> executed = new ArrayList<>(count); - for (int i = 0; i < count; i++) { - executed.add(new AtomicReference<>(false)); - } - NamedCommand[] cmds = new NamedCommand[count]; - for (int i = 0; i < count; i++) { - final int index = i; - cmds[i] = new NamedCommand("cmd" + index, terminal) { - @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception { - executed.get(index).set(true); - return OK; - } - }; - } - MultiCmdTool tool = new MultiCmdTool("tool", terminal, cmds); - int cmdIndex = randomIntBetween(0, count-1); - CliTool.ExitStatus status = tool.execute("cmd" + cmdIndex); - assertThat(status, is(OK)); - for (int i = 0; i < count; i++) { - assertThat(executed.get(i).get(), is(i == cmdIndex)); - } - } - - public void testMultiCommandUnknownCommand() throws Exception { - Terminal terminal = new MockTerminal(); - int count = randomIntBetween(2, 7); - List> executed = new ArrayList<>(count); - for (int i = 0; i < count; i++) { - executed.add(new AtomicReference<>(false)); - } - NamedCommand[] cmds = new NamedCommand[count]; - for (int i = 0; i < count; i++) { - final int index = i; - cmds[i] = new NamedCommand("cmd" + index, terminal) { - @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception { - executed.get(index).set(true); - return OK; - } - }; - } - MultiCmdTool tool = new MultiCmdTool("tool", terminal, cmds); - CliTool.ExitStatus status = tool.execute("cmd" + count); // "cmd" + count doesn't exist - assertThat(status, is(CliTool.ExitStatus.USAGE)); - for (int i = 0; i < count; i++) { - assertThat(executed.get(i).get(), is(false)); - } - } - - public void testSingleCommandToolHelp() throws Exception { - CaptureOutputTerminal terminal = new CaptureOutputTerminal(); - final AtomicReference executed = new AtomicReference<>(false); - final NamedCommand cmd = new NamedCommand("cmd1", terminal) { - @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception { - executed.set(true); - throw new IOException("io error"); - } - }; - SingleCmdTool tool = new SingleCmdTool("tool", terminal, cmd); - CliTool.ExitStatus status = tool.execute(args("-h")); - assertStatus(status, CliTool.ExitStatus.OK_AND_EXIT); - assertThat(terminal.getTerminalOutput(), hasSize(3)); - assertThat(terminal.getTerminalOutput(), hasItem(containsString("cmd1 help"))); - } - - public void testMultiCommandToolHelp() throws Exception { - CaptureOutputTerminal terminal = new CaptureOutputTerminal(); - NamedCommand[] cmds = new NamedCommand[2]; - cmds[0] = new NamedCommand("cmd0", terminal) { - @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception { - return OK; - } - }; - cmds[1] = new NamedCommand("cmd1", terminal) { - @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception { - return OK; - } - }; - MultiCmdTool tool = new MultiCmdTool("tool", terminal, cmds); - CliTool.ExitStatus status = tool.execute(args("-h")); - assertStatus(status, CliTool.ExitStatus.OK_AND_EXIT); - assertThat(terminal.getTerminalOutput(), hasSize(3)); - assertThat(terminal.getTerminalOutput(), hasItem(containsString("tool help"))); - } - - public void testMultiCommandCmdHelp() throws Exception { - CaptureOutputTerminal terminal = new CaptureOutputTerminal(); - NamedCommand[] cmds = new NamedCommand[2]; - cmds[0] = new NamedCommand("cmd0", terminal) { - @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception { - return OK; - } - }; - cmds[1] = new NamedCommand("cmd1", terminal) { - @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception { - return OK; - } - }; - MultiCmdTool tool = new MultiCmdTool("tool", terminal, cmds); - CliTool.ExitStatus status = tool.execute(args("cmd1 -h")); - assertStatus(status, CliTool.ExitStatus.OK_AND_EXIT); - assertThat(terminal.getTerminalOutput(), hasSize(3)); - assertThat(terminal.getTerminalOutput(), hasItem(containsString("cmd1 help"))); - } - - public void testNonUserErrorPropagates() throws Exception { - CaptureOutputTerminal terminal = new CaptureOutputTerminal(); - NamedCommand cmd = new NamedCommand("cmd", terminal) { - @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception { - throw new IOException("error message"); - } - }; - SingleCmdTool tool = new SingleCmdTool("tool", terminal, cmd); - IOException e = expectThrows(IOException.class, () -> { - tool.execute(); - }); - assertEquals("error message", e.getMessage()); - } - - public void testMultipleLaunch() throws Exception { - Terminal terminal = new MockTerminal(); - final AtomicReference executed = new AtomicReference<>(false); - final NamedCommand cmd = new NamedCommand("cmd", terminal) { - @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) { - executed.set(true); - return OK; - } - }; - SingleCmdTool tool = new SingleCmdTool("tool", terminal, cmd); - tool.parse("cmd", Strings.splitStringByCommaToArray("--verbose")); - tool.parse("cmd", Strings.splitStringByCommaToArray("--silent")); - tool.parse("cmd", Strings.splitStringByCommaToArray("--help")); - } - - public void testPromptForSetting() throws Exception { - final AtomicInteger counter = new AtomicInteger(); - final AtomicReference promptedSecretValue = new AtomicReference<>(null); - final AtomicReference promptedTextValue = new AtomicReference<>(null); - final Terminal terminal = new MockTerminal() { - @Override - public char[] readSecret(String text) { - counter.incrementAndGet(); - return "changeit".toCharArray(); - } - - @Override - public String readText(String text) { - counter.incrementAndGet(); - return "replaced"; - } - }; - final NamedCommand cmd = new NamedCommand("noop", terminal) { - @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) { - promptedSecretValue.set(settings.get("foo.password")); - promptedTextValue.set(settings.get("replace")); - return OK; - } - }; - - System.setProperty("es.foo.password", InternalSettingsPreparer.SECRET_PROMPT_VALUE); - System.setProperty("es.replace", InternalSettingsPreparer.TEXT_PROMPT_VALUE); - try { - new SingleCmdTool("tool", terminal, cmd).execute(); - } finally { - System.clearProperty("es.foo.password"); - System.clearProperty("es.replace"); - } - - assertThat(counter.intValue(), is(2)); - assertThat(promptedSecretValue.get(), is("changeit")); - assertThat(promptedTextValue.get(), is("replaced")); - } - - public void testStopAtNonOptionParsing() throws Exception { - final CliToolConfig.Cmd lenientCommand = cmd("lenient", CliTool.Command.Exit.class).stopAtNonOption(true).build(); - final CliToolConfig.Cmd strictCommand = cmd("strict", CliTool.Command.Exit.class).stopAtNonOption(false).build(); - final CliToolConfig config = CliToolConfig.config("elasticsearch", CliTool.class).cmds(lenientCommand, strictCommand).build(); - - final CaptureOutputTerminal terminal = new CaptureOutputTerminal(); - final CliTool cliTool = new CliTool(config, terminal) { - @Override - protected Command parse(String cmdName, CommandLine cli) throws Exception { - return new NamedCommand(cmdName, terminal) { - @Override - public ExitStatus execute(Settings settings, Environment env) throws Exception { - return OK; - } - }; - } - }; - - // known parameters, no error - assertStatus(cliTool.execute(args("lenient --verbose")), OK); - assertStatus(cliTool.execute(args("lenient -v")), OK); - - // unknown parameters, no error - assertStatus(cliTool.execute(args("lenient --unknown")), OK); - assertStatus(cliTool.execute(args("lenient -u")), OK); - - // unknown parameters, error - assertStatus(cliTool.execute(args("strict --unknown")), USAGE); - assertThat(terminal.getTerminalOutput(), hasItem(containsString("Unrecognized option: --unknown"))); - - terminal.getTerminalOutput().clear(); - assertStatus(cliTool.execute(args("strict -u")), USAGE); - assertThat(terminal.getTerminalOutput(), hasItem(containsString("Unrecognized option: -u"))); - } - - private void assertStatus(CliTool.ExitStatus status, CliTool.ExitStatus expectedStatus) { - assertThat(status, is(expectedStatus)); - } - - private void assertCommandHasBeenExecuted(AtomicReference executed) { - assertThat("Expected command atomic reference counter to be set to true", executed.get(), is(Boolean.TRUE)); - } - - private static class SingleCmdTool extends CliTool { - - private final Command command; - - private SingleCmdTool(String name, Terminal terminal, NamedCommand command) { - super(CliToolConfig.config(name, SingleCmdTool.class) - .cmds(cmd(command.name, command.getClass())) - .build(), terminal); - this.command = command; - } - - @Override - protected Command parse(String cmdName, CommandLine cli) throws Exception { - return command; - } - } - - private static class MultiCmdTool extends CliTool { - - private final Map commands; - - private MultiCmdTool(String name, Terminal terminal, NamedCommand... commands) { - super(CliToolConfig.config(name, MultiCmdTool.class) - .cmds(cmds(commands)) - .build(), terminal); - Map commandByName = new HashMap<>(); - for (int i = 0; i < commands.length; i++) { - commandByName.put(commands[i].name, commands[i]); - } - this.commands = unmodifiableMap(commandByName); - } - - @Override - protected Command parse(String cmdName, CommandLine cli) throws Exception { - return commands.get(cmdName); - } - - private static CliToolConfig.Cmd[] cmds(NamedCommand... commands) { - CliToolConfig.Cmd[] cmds = new CliToolConfig.Cmd[commands.length]; - for (int i = 0; i < commands.length; i++) { - cmds[i] = cmd(commands[i].name, commands[i].getClass()).build(); - } - return cmds; - } - } - - private static abstract class NamedCommand extends CliTool.Command { - - private final String name; - - private NamedCommand(String name, Terminal terminal) { - super(terminal); - this.name = name; - } - } -} diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java index 66dfa67ccbd..fb69c817f3a 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java @@ -44,10 +44,8 @@ import java.util.zip.ZipOutputStream; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.Version; -import org.elasticsearch.common.cli.CliTool; -import org.elasticsearch.common.cli.CliToolTestCase; -import org.elasticsearch.common.cli.Terminal; -import org.elasticsearch.common.cli.UserError; +import org.elasticsearch.cli.MockTerminal; +import org.elasticsearch.cli.UserError; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESTestCase; @@ -116,10 +114,9 @@ public class InstallPluginCommandTests extends ESTestCase { return writeZip(structure, "elasticsearch"); } - static CliToolTestCase.CaptureOutputTerminal installPlugin(String pluginUrl, Environment env) throws Exception { - CliToolTestCase.CaptureOutputTerminal terminal = new CliToolTestCase.CaptureOutputTerminal(Terminal.Verbosity.NORMAL); - CliTool.ExitStatus status = new InstallPluginCommand(terminal, pluginUrl, true).execute(env.settings(), env); - assertEquals(CliTool.ExitStatus.OK, status); + static MockTerminal installPlugin(String pluginUrl, Environment env) throws Exception { + MockTerminal terminal = new MockTerminal(); + new InstallPluginCommand(env).execute(terminal, pluginUrl, true); return terminal; } @@ -469,6 +466,18 @@ public class InstallPluginCommandTests extends ESTestCase { assertInstallCleaned(env); } + public void testZipRelativeOutsideEntryName() throws Exception { + Path zip = createTempDir().resolve("broken.zip"); + try (ZipOutputStream stream = new ZipOutputStream(Files.newOutputStream(zip))) { + stream.putNextEntry(new ZipEntry("elasticsearch/../blah")); + } + String pluginZip = zip.toUri().toURL().toString(); + IOException e = expectThrows(IOException.class, () -> { + installPlugin(pluginZip, createEnv()); + }); + assertTrue(e.getMessage(), e.getMessage().contains("resolving outside of plugin directory")); + } + // TODO: test batch flag? // TODO: test checksum (need maven/official below) // TODO: test maven, official, and staging install...need tests with fixtures... diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java index c68e207c0c3..cbdd031dea1 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java @@ -22,13 +22,10 @@ package org.elasticsearch.plugins; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; -import java.util.Collections; -import java.util.List; import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.common.cli.CliTool; -import org.elasticsearch.common.cli.CliToolTestCase; -import org.elasticsearch.common.cli.Terminal; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.MockTerminal; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESTestCase; @@ -45,10 +42,11 @@ public class ListPluginsCommandTests extends ESTestCase { return new Environment(settings); } - static CliToolTestCase.CaptureOutputTerminal listPlugins(Environment env) throws Exception { - CliToolTestCase.CaptureOutputTerminal terminal = new CliToolTestCase.CaptureOutputTerminal(Terminal.Verbosity.NORMAL); - CliTool.ExitStatus status = new ListPluginsCommand(terminal).execute(env.settings(), env); - assertEquals(CliTool.ExitStatus.OK, status); + static MockTerminal listPlugins(Environment env) throws Exception { + MockTerminal terminal = new MockTerminal(); + String[] args = {}; + int status = new ListPluginsCommand(env).main(args, terminal); + assertEquals(ExitCodes.OK, status); return terminal; } @@ -62,29 +60,24 @@ public class ListPluginsCommandTests extends ESTestCase { } public void testNoPlugins() throws Exception { - CliToolTestCase.CaptureOutputTerminal terminal = listPlugins(createEnv()); - List lines = terminal.getTerminalOutput(); - assertEquals(0, lines.size()); + MockTerminal terminal = listPlugins(createEnv()); + assertTrue(terminal.getOutput(), terminal.getOutput().isEmpty()); } public void testOnePlugin() throws Exception { Environment env = createEnv(); Files.createDirectory(env.pluginsFile().resolve("fake")); - CliToolTestCase.CaptureOutputTerminal terminal = listPlugins(env); - List lines = terminal.getTerminalOutput(); - assertEquals(1, lines.size()); - assertTrue(lines.get(0).contains("fake")); + MockTerminal terminal = listPlugins(env); + assertTrue(terminal.getOutput(), terminal.getOutput().contains("fake")); } public void testTwoPlugins() throws Exception { Environment env = createEnv(); Files.createDirectory(env.pluginsFile().resolve("fake1")); Files.createDirectory(env.pluginsFile().resolve("fake2")); - CliToolTestCase.CaptureOutputTerminal terminal = listPlugins(env); - List lines = terminal.getTerminalOutput(); - assertEquals(2, lines.size()); - Collections.sort(lines); - assertTrue(lines.get(0).contains("fake1")); - assertTrue(lines.get(1).contains("fake2")); + MockTerminal terminal = listPlugins(env); + String output = terminal.getOutput(); + assertTrue(output, output.contains("fake1")); + assertTrue(output, output.contains("fake2")); } } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/PluginSecurityTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/PluginSecurityTests.java index de1486a3bc2..466f7d05cd1 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/PluginSecurityTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/PluginSecurityTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.plugins; -import org.elasticsearch.common.cli.Terminal; +import org.elasticsearch.cli.Terminal; import org.elasticsearch.test.ESTestCase; import java.nio.file.Path; @@ -31,7 +31,7 @@ import java.util.List; /** Tests plugin manager security check */ public class PluginSecurityTests extends ESTestCase { - + /** Test that we can parse the set of permissions correctly for a simple policy */ public void testParsePermissions() throws Exception { assumeTrue("test cannot run with security manager enabled", System.getSecurityManager() == null); @@ -42,7 +42,7 @@ public class PluginSecurityTests extends ESTestCase { PermissionCollection actual = PluginSecurity.parsePermissions(Terminal.DEFAULT, testFile, scratch); assertEquals(expected, actual); } - + /** Test that we can parse the set of permissions correctly for a complex policy */ public void testParseTwoPermissions() throws Exception { assumeTrue("test cannot run with security manager enabled", System.getSecurityManager() == null); @@ -54,12 +54,12 @@ public class PluginSecurityTests extends ESTestCase { PermissionCollection actual = PluginSecurity.parsePermissions(Terminal.DEFAULT, testFile, scratch); assertEquals(expected, actual); } - + /** Test that we can format some simple permissions properly */ public void testFormatSimplePermission() throws Exception { assertEquals("java.lang.RuntimePermission queuePrintJob", PluginSecurity.formatPermission(new RuntimePermission("queuePrintJob"))); } - + /** Test that we can format an unresolved permission properly */ public void testFormatUnresolvedPermission() throws Exception { assumeTrue("test cannot run with security manager enabled", System.getSecurityManager() == null); @@ -70,7 +70,7 @@ public class PluginSecurityTests extends ESTestCase { assertEquals(1, permissions.size()); assertEquals("org.fake.FakePermission fakeName", PluginSecurity.formatPermission(permissions.get(0))); } - + /** no guaranteed equals on these classes, we assert they contain the same set */ private void assertEquals(PermissionCollection expected, PermissionCollection actual) { assertEquals(asSet(Collections.list(expected.elements())), asSet(Collections.list(actual.elements()))); diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java index 10fbc3c2696..d9d5661b834 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java @@ -25,10 +25,8 @@ import java.nio.file.Files; import java.nio.file.Path; import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.common.cli.CliTool; -import org.elasticsearch.common.cli.CliToolTestCase; -import org.elasticsearch.common.cli.Terminal; -import org.elasticsearch.common.cli.UserError; +import org.elasticsearch.cli.UserError; +import org.elasticsearch.cli.MockTerminal; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESTestCase; @@ -48,10 +46,9 @@ public class RemovePluginCommandTests extends ESTestCase { return new Environment(settings); } - static CliToolTestCase.CaptureOutputTerminal removePlugin(String name, Environment env) throws Exception { - CliToolTestCase.CaptureOutputTerminal terminal = new CliToolTestCase.CaptureOutputTerminal(Terminal.Verbosity.VERBOSE); - CliTool.ExitStatus status = new RemovePluginCommand(terminal, name).execute(env.settings(), env); - assertEquals(CliTool.ExitStatus.OK, status); + static MockTerminal removePlugin(String name, Environment env) throws Exception { + MockTerminal terminal = new MockTerminal(); + new RemovePluginCommand(env).execute(terminal, name); return terminal; } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java index 9c68ea196aa..63c09890acc 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java @@ -23,7 +23,7 @@ import org.apache.lucene.util.IOUtils; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.InternalClusterService; +import org.elasticsearch.cluster.node.DiscoveryNodeService; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; @@ -66,14 +66,14 @@ public class TribeUnitTests extends ESTestCase { .put(baseSettings) .put("cluster.name", "tribe1") .put("node.name", "tribe1_node") - .put(InternalClusterService.NODE_ID_SEED_SETTING.getKey(), random().nextLong()) + .put(DiscoveryNodeService.NODE_ID_SEED_SETTING.getKey(), random().nextLong()) .build()).start(); tribe2 = new TribeClientNode( Settings.builder() .put(baseSettings) .put("cluster.name", "tribe2") .put("node.name", "tribe2_node") - .put(InternalClusterService.NODE_ID_SEED_SETTING.getKey(), random().nextLong()) + .put(DiscoveryNodeService.NODE_ID_SEED_SETTING.getKey(), random().nextLong()) .build()).start(); } diff --git a/qa/smoke-test-ingest-disabled/build.gradle b/qa/smoke-test-ingest-disabled/build.gradle index ca71697a7b4..09b2d1409a1 100644 --- a/qa/smoke-test-ingest-disabled/build.gradle +++ b/qa/smoke-test-ingest-disabled/build.gradle @@ -21,6 +21,6 @@ apply plugin: 'elasticsearch.rest-test' integTest { cluster { - systemProperty 'es.node.ingest', 'false' + setting 'node.ingest', 'false' } } diff --git a/qa/smoke-test-reindex-with-groovy/build.gradle b/qa/smoke-test-reindex-with-groovy/build.gradle index a42f5e708a2..c4b462ce45a 100644 --- a/qa/smoke-test-reindex-with-groovy/build.gradle +++ b/qa/smoke-test-reindex-with-groovy/build.gradle @@ -21,6 +21,6 @@ apply plugin: 'elasticsearch.rest-test' integTest { cluster { - systemProperty 'es.script.inline', 'true' + setting 'script.inline', 'true' } } diff --git a/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/reindex/10_script.yaml b/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/reindex/10_script.yaml index d37a94deea7..aa553a5c9dc 100644 --- a/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/reindex/10_script.yaml +++ b/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/reindex/10_script.yaml @@ -395,3 +395,51 @@ match: user: otherkimchy - match: { hits.total: 1 } + +--- +"Change index to write to a different index": + - do: + index: + index: twitter + type: tweet + id: 1 + body: { "user": "kimchy" } + - do: + index: + index: twitter + type: tweet + id: 2 + body: { "user": "another" } + - do: + indices.refresh: {} + + - do: + reindex: + refresh: true + body: + source: + index: twitter + dest: + index: new_twitter + script: + inline: if (ctx._source.user == "kimchy") {ctx._index = 'other_new_twitter'} + - match: {created: 2} + - match: {noops: 0} + + - do: + search: + index: other_new_twitter + body: + query: + match: + user: kimchy + - match: { hits.total: 1 } + + - do: + search: + index: new_twitter + body: + query: + match: + user: another + - match: { hits.total: 1 } diff --git a/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/reindex/30_timeout.yaml b/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/reindex/30_timeout.yaml new file mode 100644 index 00000000000..ddd22246717 --- /dev/null +++ b/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/reindex/30_timeout.yaml @@ -0,0 +1,29 @@ +--- +"Timeout": + - do: + index: + index: twitter + type: tweet + id: 1 + body: { "user": "kimchy" } + - do: + indices.refresh: {} + + - do: + catch: request_timeout + reindex: + refresh: true + body: + source: + index: twitter + timeout: 10 + query: + script: + # Sleep 100x longer than the timeout. That should cause a timeout! + # Return true causes the document to try to be collected which is what actually triggers the timeout. + script: sleep(1000); return true + dest: + index: new_twitter + - is_true: timed_out + - match: {created: 0} + - match: {noops: 0} diff --git a/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/reindex/40_search_failures.yaml b/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/reindex/40_search_failures.yaml new file mode 100644 index 00000000000..50442c2d51b --- /dev/null +++ b/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/reindex/40_search_failures.yaml @@ -0,0 +1,34 @@ +--- +"Response format search failures": + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + - do: + indices.refresh: {} + + - do: + catch: request + reindex: + body: + source: + index: source + query: + script: + script: 1/0 # Divide by 0 to cause a search time exception + dest: + index: dest + - match: {created: 0} + - match: {updated: 0} + - match: {version_conflicts: 0} + - match: {batches: 0} + - is_true: failures.0.shard + - match: {failures.0.index: source} + - is_true: failures.0.node + - match: {failures.0.reason.type: script_exception} + - match: {failures.0.reason.reason: "failed to run inline script [1/0] using lang [groovy]"} + - match: {failures.0.reason.caused_by.type: arithmetic_exception} + - match: {failures.0.reason.caused_by.reason: Division by zero} + - is_true: took diff --git a/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/update-by-query/30_timeout.yaml b/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/update-by-query/30_timeout.yaml new file mode 100644 index 00000000000..2a291bf0541 --- /dev/null +++ b/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/update-by-query/30_timeout.yaml @@ -0,0 +1,26 @@ +--- +"Timeout": + - do: + index: + index: twitter + type: tweet + id: 1 + body: { "user": "kimchy" } + - do: + indices.refresh: {} + + - do: + catch: request_timeout + update-by-query: + index: twitter + refresh: true + search_timeout: 10ms + body: + query: + script: + # Sleep 100x longer than the timeout. That should cause a timeout! + # Return true causes the document to try to be collected which is what actually triggers the timeout. + script: sleep(1000); return true + - is_true: timed_out + - match: {updated: 0} + - match: {noops: 0} diff --git a/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/update-by-query/40_search_failure.yaml b/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/update-by-query/40_search_failure.yaml new file mode 100644 index 00000000000..8f89409c586 --- /dev/null +++ b/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/update-by-query/40_search_failure.yaml @@ -0,0 +1,30 @@ +--- +"Response format search failures": + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + - do: + indices.refresh: {} + + - do: + catch: request + update-by-query: + index: source + body: + query: + script: + script: 1/0 # Divide by 0 to cause a search time exception + - match: {updated: 0} + - match: {version_conflicts: 0} + - match: {batches: 0} + - is_true: failures.0.shard + - match: {failures.0.index: source} + - is_true: failures.0.node + - match: {failures.0.reason.type: script_exception} + - match: {failures.0.reason.reason: "failed to run inline script [1/0] using lang [groovy]"} + - match: {failures.0.reason.caused_by.type: arithmetic_exception} + - match: {failures.0.reason.caused_by.reason: Division by zero} + - is_true: took diff --git a/qa/vagrant/build.gradle b/qa/vagrant/build.gradle index d0be6f13946..1df5b7dc402 100644 --- a/qa/vagrant/build.gradle +++ b/qa/vagrant/build.gradle @@ -26,8 +26,17 @@ String testScripts = '*.bats' String testCommand = "cd \$TESTROOT && sudo bats --tap \$BATS/$testScripts" String smokeTestCommand = 'echo I work' List representativeBoxes = ['ubuntu-1404', 'centos-7'] -List boxes = representativeBoxes + ['ubuntu-1204', 'ubuntu-1504', - 'debian-8', 'centos-6', 'oel-7', 'fedora-22', 'opensuse-13', 'sles-12'] +List boxes = representativeBoxes + [ + 'ubuntu-1204', + 'ubuntu-1504', + 'debian-8', + 'centos-6', + 'oel-6', + 'oel-7', + 'fedora-22', + 'opensuse-13', + 'sles-12' +] /* The version of elasticsearch that we upgrade *from* as part of testing * upgrades. */ diff --git a/qa/vagrant/src/test/resources/packaging/scripts/25_tar_plugins.bats b/qa/vagrant/src/test/resources/packaging/scripts/25_tar_plugins.bats index 8f55b1eb78c..552c404a3d6 120000 --- a/qa/vagrant/src/test/resources/packaging/scripts/25_tar_plugins.bats +++ b/qa/vagrant/src/test/resources/packaging/scripts/25_tar_plugins.bats @@ -1 +1 @@ -plugin_test_cases.bash \ No newline at end of file +module_and_plugin_test_cases.bash \ No newline at end of file diff --git a/qa/vagrant/src/test/resources/packaging/scripts/50_modules_and_plugins.bats b/qa/vagrant/src/test/resources/packaging/scripts/50_modules_and_plugins.bats new file mode 120000 index 00000000000..552c404a3d6 --- /dev/null +++ b/qa/vagrant/src/test/resources/packaging/scripts/50_modules_and_plugins.bats @@ -0,0 +1 @@ +module_and_plugin_test_cases.bash \ No newline at end of file diff --git a/qa/vagrant/src/test/resources/packaging/scripts/50_plugins.bats b/qa/vagrant/src/test/resources/packaging/scripts/50_plugins.bats deleted file mode 120000 index 8f55b1eb78c..00000000000 --- a/qa/vagrant/src/test/resources/packaging/scripts/50_plugins.bats +++ /dev/null @@ -1 +0,0 @@ -plugin_test_cases.bash \ No newline at end of file diff --git a/qa/vagrant/src/test/resources/packaging/scripts/plugin_test_cases.bash b/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash similarity index 81% rename from qa/vagrant/src/test/resources/packaging/scripts/plugin_test_cases.bash rename to qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash index c81d850d94d..e92c4b62b76 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/plugin_test_cases.bash +++ b/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash @@ -33,13 +33,14 @@ ################################## # Common test cases for both tar and rpm/deb based plugin tests ################################## -# This file is symlinked to both 25_tar_plugins.bats and 50_plugins.bats so its +# This file is symlinked to both 25_tar_plugins.bats and 50_modules_and_plugins.bats so its # executed twice - once to test plugins using the tar distribution and once to # test files using the rpm distribution or the deb distribution, whichever the # system uses. # Load test utilities load packaging_test_utils +load modules load plugins setup() { @@ -102,7 +103,7 @@ fi echo "CONF_FILE=$CONF_FILE" >> /etc/sysconfig/elasticsearch; fi - run_elasticsearch_service 1 -Des.default.config="$CONF_FILE" + run_elasticsearch_service 1 -Ees.default.config="$CONF_FILE" # remove settings again otherwise cleaning up before next testrun will fail if is_dpkg ; then @@ -219,16 +220,41 @@ fi install_and_check_plugin discovery ec2 aws-java-sdk-core-*.jar } -@test "[$GROUP] install lang-expression plugin" { - install_and_check_plugin lang expression +@test "[$GROUP] install ingest-attachment plugin" { + # we specify the version on the poi-3.13.jar so that the test does + # not spuriously pass if the jar is missing but the other poi jars + # are present + install_and_check_plugin ingest attachment bcprov-jdk15on-*.jar tika-core-*.jar pdfbox-*.jar poi-3.13.jar } -@test "[$GROUP] install lang-groovy plugin" { - install_and_check_plugin lang groovy +@test "[$GROUP] install ingest-geoip plugin" { + install_and_check_plugin ingest geoip geoip2-*.jar jackson-annotations-*.jar jackson-databind-*.jar maxmind-db-*.jar } -@test "[$GROUP] install lang-painless plugin" { - install_and_check_plugin lang painless +@test "[$GROUP] check ingest-grok module" { + check_module ingest-grok jcodings-*.jar joni-*.jar +} + +@test "[$GROUP] check lang-expression module" { + # we specify the version on the asm-5.0.4.jar so that the test does + # not spuriously pass if the jar is missing but the other asm jars + # are present + check_secure_module lang-expression antlr4-runtime-*.jar asm-5.0.4.jar asm-commons-*.jar asm-tree-*.jar lucene-expressions-*.jar +} + +@test "[$GROUP] check lang-groovy module" { + check_secure_module lang-groovy groovy-*-indy.jar +} + +@test "[$GROUP] check lang-mustache module" { + check_secure_module lang-mustache compiler-*.jar +} + +@test "[$GROUP] check lang-painless module" { + # we specify the version on the asm-5.0.4.jar so that the test does + # not spuriously pass if the jar is missing but the other asm jars + # are present + check_secure_module lang-painless antlr4-runtime-*.jar asm-5.0.4.jar asm-commons-*.jar asm-tree-*.jar } @test "[$GROUP] install javascript plugin" { @@ -247,6 +273,14 @@ fi install_and_check_plugin mapper murmur3 } +@test "[$GROUP] check reindex module" { + check_module reindex +} + +@test "[$GROUP] install repository-hdfs plugin" { + install_and_check_plugin repository hdfs hadoop-client-*.jar hadoop-common-*.jar hadoop-annotations-*.jar hadoop-auth-*.jar hadoop-hdfs-*.jar htrace-core-*.jar guava-*.jar protobuf-java-*.jar commons-logging-*.jar commons-cli-*.jar commons-collections-*.jar commons-configuration-*.jar commons-io-*.jar commons-lang-*.jar servlet-api-*.jar slf4j-api-*.jar +} + @test "[$GROUP] install size mapper plugin" { install_and_check_plugin mapper size } @@ -264,7 +298,7 @@ fi } @test "[$GROUP] check the installed plugins can be listed with 'plugins list' and result matches the list of plugins in plugins pom" { - "$ESHOME/bin/elasticsearch-plugin" list | tail -n +2 | sed 's/^......//' > /tmp/installed + "$ESHOME/bin/elasticsearch-plugin" list > /tmp/installed compare_plugins_list "/tmp/installed" "'plugins list'" } @@ -321,16 +355,12 @@ fi remove_plugin discovery-ec2 } -@test "[$GROUP] remove lang-expression plugin" { - remove_plugin lang-expression +@test "[$GROUP] remove ingest-attachment plugin" { + remove_plugin ingest-attachment } -@test "[$GROUP] remove lang-groovy plugin" { - remove_plugin lang-groovy -} - -@test "[$GROUP] remove lang-painless plugin" { - remove_plugin lang-painless +@test "[$GROUP] remove ingest-geoip plugin" { + remove_plugin ingest-geoip } @test "[$GROUP] remove javascript plugin" { @@ -357,12 +387,12 @@ fi remove_plugin repository-azure } -@test "[$GROUP] remove repository-s3 plugin" { - remove_plugin repository-s3 +@test "[$GROUP] remove repository-hdfs plugin" { + remove_plugin repository-hdfs } -@test "[$GROUP] remove site example plugin" { - remove_plugin site-example +@test "[$GROUP] remove repository-s3 plugin" { + remove_plugin repository-s3 } @test "[$GROUP] remove store-smb plugin" { @@ -393,14 +423,14 @@ fi local loglines=$(cat /tmp/plugin-cli-output | wc -l) if [ "$GROUP" == "TAR PLUGINS" ]; then # tar extraction does not create the plugins directory so the plugin tool will print an additional line that the directory will be created - [ "$loglines" -eq "7" ] || { - echo "Expected 7 lines but the output was:" + [ "$loglines" -eq "3" ] || { + echo "Expected 3 lines but the output was:" cat /tmp/plugin-cli-output false } else - [ "$loglines" -eq "6" ] || { - echo "Expected 6 lines but the output was:" + [ "$loglines" -eq "2" ] || { + echo "Expected 2 lines but the output was:" cat /tmp/plugin-cli-output false } @@ -411,14 +441,14 @@ fi sudo -E -u $ESPLUGIN_COMMAND_USER "$ESHOME/bin/elasticsearch-plugin" install "file://$relativePath" -Des.logger.level=DEBUG > /tmp/plugin-cli-output local loglines=$(cat /tmp/plugin-cli-output | wc -l) if [ "$GROUP" == "TAR PLUGINS" ]; then - [ "$loglines" -gt "7" ] || { - echo "Expected more than 7 lines but the output was:" + [ "$loglines" -gt "3" ] || { + echo "Expected more than 3 lines but the output was:" cat /tmp/plugin-cli-output false } else - [ "$loglines" -gt "6" ] || { - echo "Expected more than 6 lines but the output was:" + [ "$loglines" -gt "2" ] || { + echo "Expected more than 2 lines but the output was:" cat /tmp/plugin-cli-output false } diff --git a/qa/vagrant/src/test/resources/packaging/scripts/modules.bash b/qa/vagrant/src/test/resources/packaging/scripts/modules.bash new file mode 100644 index 00000000000..bd6da680da9 --- /dev/null +++ b/qa/vagrant/src/test/resources/packaging/scripts/modules.bash @@ -0,0 +1,44 @@ +#!/bin/sh + +# This file contains some utilities to test the elasticsearch scripts, +# the .deb/.rpm packages and the SysV/Systemd scripts. + +# WARNING: This testing file must be executed as root and can +# dramatically change your system. It removes the 'elasticsearch' +# user/group and also many directories. Do not execute this file +# unless you know exactly what you are doing. + +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +check_module() { + local name=$1 + shift + + assert_module_or_plugin_directory "$ESMODULES/$name" + + for file in "$@"; do + assert_module_or_plugin_file "$ESMODULES/$name/$file" + done + + assert_module_or_plugin_file "$ESMODULES/$name/$name-*.jar" + assert_module_or_plugin_file "$ESMODULES/$name/plugin-descriptor.properties" +} + +check_secure_module() { + check_module "$@" plugin-security.policy +} \ No newline at end of file diff --git a/qa/vagrant/src/test/resources/packaging/scripts/os_package.bash b/qa/vagrant/src/test/resources/packaging/scripts/os_package.bash index f48532cb3f3..bcc0fd66f2e 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/os_package.bash +++ b/qa/vagrant/src/test/resources/packaging/scripts/os_package.bash @@ -30,6 +30,7 @@ export_elasticsearch_paths() { export ESHOME="/usr/share/elasticsearch" export ESPLUGINS="$ESHOME/plugins" + export ESMODULES="$ESHOME/modules" export ESCONFIG="/etc/elasticsearch" export ESSCRIPTS="$ESCONFIG/scripts" export ESDATA="/var/lib/elasticsearch" @@ -81,7 +82,8 @@ verify_package_installation() { assert_file "$ESSCRIPTS" d root elasticsearch 750 assert_file "$ESDATA" d elasticsearch elasticsearch 755 assert_file "$ESLOG" d elasticsearch elasticsearch 755 - assert_file "$ESPLUGINS" d elasticsearch elasticsearch 755 + assert_file "$ESPLUGINS" d root root 755 + assert_file "$ESMODULES" d root root 755 assert_file "$ESPIDDIR" d elasticsearch elasticsearch 755 assert_file "$ESHOME/NOTICE.txt" f root root 644 assert_file "$ESHOME/README.textile" f root root 644 diff --git a/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash b/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash index 11961e06921..cb18363d60a 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash +++ b/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash @@ -179,6 +179,32 @@ assert_file() { fi } +assert_module_or_plugin_directory() { + local directory=$1 + shift + + #owner group and permissions vary depending on how es was installed + #just make sure that everything is the same as $CONFIG_DIR, which was properly set up during install + config_user=$(find "$ESHOME" -maxdepth 0 -printf "%u") + config_owner=$(find "$ESHOME" -maxdepth 0 -printf "%g") + # directories should use the user file-creation mask + config_privileges=$(executable_privileges_for_user_from_umask $ESPLUGIN_COMMAND_USER) + + assert_file $directory d $config_user $config_owner $(printf "%o" $config_privileges) +} + +assert_module_or_plugin_file() { + local file=$1 + shift + + assert_file_exist "$(readlink -m $file)" + + # config files should not be executable and otherwise use the user + # file-creation mask + expected_file_privileges=$(file_privileges_for_user_from_umask $ESPLUGIN_COMMAND_USER) + assert_file $file f $config_user $config_owner $(printf "%o" $expected_file_privileges) +} + assert_output() { echo "$output" | grep -E "$1" } @@ -285,6 +311,9 @@ run_elasticsearch_service() { if [ -f "/tmp/elasticsearch/bin/elasticsearch" ]; then if [ -z "$CONF_DIR" ]; then local CONF_DIR="" + local ES_PATH_CONF="" + else + local ES_PATH_CONF="-Ees.path.conf=$CONF_DIR" fi # we must capture the exit code to compare so we don't want to start as background process in case we expect something other than 0 local background="" @@ -303,7 +332,7 @@ run_elasticsearch_service() { # This line is attempting to emulate the on login behavior of /usr/share/upstart/sessions/jayatana.conf [ -f /usr/share/java/jayatanaag.jar ] && export JAVA_TOOL_OPTIONS="-javaagent:/usr/share/java/jayatanaag.jar" # And now we can start Elasticsearch normally, in the background (-d) and with a pidfile (-p). -$timeoutCommand/tmp/elasticsearch/bin/elasticsearch $background -p /tmp/elasticsearch/elasticsearch.pid -Des.path.conf=$CONF_DIR $commandLineArgs +$timeoutCommand/tmp/elasticsearch/bin/elasticsearch $background -p /tmp/elasticsearch/elasticsearch.pid $ES_PATH_CONF $commandLineArgs BASH [ "$status" -eq "$expectedStatus" ] elif is_systemd; then @@ -444,3 +473,19 @@ install_script() { echo "Installing $script to $ESSCRIPTS" cp $script $ESSCRIPTS } + +# permissions from the user umask with the executable bit set +executable_privileges_for_user_from_umask() { + local user=$1 + shift + + echo $((0777 & ~$(sudo -E -u $user sh -c umask) | 0111)) +} + +# permissions from the user umask without the executable bit set +file_privileges_for_user_from_umask() { + local user=$1 + shift + + echo $((0777 & ~$(sudo -E -u $user sh -c umask) & ~0111)) +} diff --git a/qa/vagrant/src/test/resources/packaging/scripts/plugins.bash b/qa/vagrant/src/test/resources/packaging/scripts/plugins.bash index 8a32c982af1..925beaade09 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/plugins.bash +++ b/qa/vagrant/src/test/resources/packaging/scripts/plugins.bash @@ -32,7 +32,7 @@ install_plugin() { assert_file_exist "$path" - sudo -E -u $ESPLUGIN_COMMAND_USER "$ESHOME/bin/elasticsearch-plugin" install "file://$path" + sudo -E -u $ESPLUGIN_COMMAND_USER "$ESHOME/bin/elasticsearch-plugin" install -batch "file://$path" assert_file_exist "$ESPLUGINS/$name" assert_file_exist "$ESPLUGINS/$name/plugin-descriptor.properties" @@ -91,16 +91,13 @@ install_jvm_example() { #just make sure that everything is the same as $CONFIG_DIR, which was properly set up during install config_user=$(find "$ESCONFIG" -maxdepth 0 -printf "%u") config_owner=$(find "$ESCONFIG" -maxdepth 0 -printf "%g") - config_privileges=$(find "$ESCONFIG" -maxdepth 0 -printf "%m") - assert_file "$ESCONFIG/jvm-example" d $config_user $config_owner $config_privileges - #the original file has no execute permissions and that must not change, but all other permissions - #need to be inherited from the parent config dir. We check this by applying the 111 mask to the config dir privileges. - for i in `seq 0 2`; do - current_perm_dir=${config_privileges:$i:1} - final_perm=$(($current_perm_dir & ~1)) - expected_file_privileges+=$final_perm - done - assert_file "$ESCONFIG/jvm-example/example.yaml" f $config_user $config_owner $expected_file_privileges + # directories should user the user file-creation mask + config_privileges=$(executable_privileges_for_user_from_umask $ESPLUGIN_COMMAND_USER) + assert_file "$ESCONFIG/jvm-example" d $config_user $config_owner $(printf "%o" $config_privileges) + # config files should not be executable and otherwise use the user + # file-creation mask + expected_file_privileges=$(file_privileges_for_user_from_umask $ESPLUGIN_COMMAND_USER) + assert_file "$ESCONFIG/jvm-example/example.yaml" f $config_user $config_owner $(printf "%o" $expected_file_privileges) echo "Running jvm-example's bin script...." "$ESHOME/bin/jvm-example/test" | grep test @@ -136,11 +133,14 @@ install_and_check_plugin() { fi install_jvm_plugin $fullName "$(readlink -m $fullName-*.zip)" + + assert_module_or_plugin_directory "$ESPLUGINS/$fullName" + if [ $prefix == 'analysis' ]; then - assert_file_exist "$(readlink -m $ESPLUGINS/$fullName/lucene-analyzers-$name-*.jar)" + assert_module_or_plugin_file "$ESPLUGINS/$fullName/lucene-analyzers-$name-*.jar" fi for file in "$@"; do - assert_file_exist "$(readlink -m $ESPLUGINS/$fullName/$file)" + assert_module_or_plugin_file "$ESPLUGINS/$fullName/$file" done } diff --git a/qa/vagrant/src/test/resources/packaging/scripts/tar.bash b/qa/vagrant/src/test/resources/packaging/scripts/tar.bash index f9bcc10525b..56b162cdefe 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/tar.bash +++ b/qa/vagrant/src/test/resources/packaging/scripts/tar.bash @@ -68,6 +68,7 @@ move_elasticsearch() { # Export some useful paths. export_elasticsearch_paths() { + export ESMODULES="$ESHOME/modules" export ESPLUGINS="$ESHOME/plugins" export ESCONFIG="$ESHOME/config" export ESSCRIPTS="$ESCONFIG/scripts" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.analyze.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.analyze.json index 9fe9bfe3cad..c3dc0a18b45 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.analyze.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.analyze.json @@ -44,13 +44,13 @@ "type" : "string", "description" : "The name of the tokenizer to use for the analysis" }, - "detail": { + "explain": { "type" : "boolean", "description" : "With `true`, outputs more advanced details. (default: false)" }, "attributes": { "type" : "list", - "description" : "A comma-separated list of token attributes to output, this parameter works only with `detail=true`" + "description" : "A comma-separated list of token attributes to output, this parameter works only with `explain=true`" }, "format": { "type": "enum", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.info.json b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.info.json index 43be35a5a86..12f0d11c5fc 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.info.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.info.json @@ -12,7 +12,7 @@ }, "metric": { "type": "list", - "options": ["settings", "os", "process", "jvm", "thread_pool", "transport", "http", "plugins"], + "options": ["settings", "os", "process", "jvm", "thread_pool", "transport", "http", "plugins", "ingest"], "description": "A comma-separated list of metrics you wish returned. Leave empty to return all." } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.list.json b/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.list.json index 7e8683b3475..5cdeed1b142 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.list.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.list.json @@ -31,6 +31,10 @@ "parent_task": { "type" : "number", "description" : "Return tasks with specified parent task id. Set to -1 to return all." + }, + "wait_for_completion": { + "type": "boolean", + "description": "Wait for the matching tasks to complete (default: false)" } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update-by-query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update-by-query.json index 9d5183ee4f3..dca49cbcc6a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update-by-query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update-by-query.json @@ -105,6 +105,10 @@ "options" : ["query_then_fetch", "dfs_query_then_fetch"], "description" : "Search operation type" }, + "search_timeout": { + "type" : "time", + "description" : "Explicit timeout for each search request. Defaults to no timeout." + }, "size": { "type" : "number", "description" : "Number of hits to return (default: 10)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.recovery/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.recovery/10_basic.yaml index 432b0e50ae4..effc4c20313 100755 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.recovery/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.recovery/10_basic.yaml @@ -27,7 +27,7 @@ ( index1 \s+ \d \s+ # shard - \d+ms \s+ # time + (?:\d+ms|\d+(?:\.\d+)?s) \s+ # time in ms or seconds (store|replica|snapshot|relocating) \s+ # type (init|index|verify_index|translog|finalize|done) \s+ # stage [-\w./]+ \s+ # source_host @@ -35,14 +35,16 @@ [-\w./]+ \s+ # repository [-\w./]+ \s+ # snapshot \d+ \s+ # files + \d+ \s+ # files_recovered \d+\.\d+% \s+ # files_percent + \d+ \s+ # files_total \d+ \s+ # bytes + \d+ \s+ # bytes_recovered \d+\.\d+% \s+ # bytes_percent - \d+ \s+ # total_files - \d+ \s+ # total_bytes - \d+ \s+ # translog - -?\d+\.\d+% \s+ # translog_percent - -?\d+ # total_translog + \d+ \s+ # bytes_total + -?\d+ \s+ # translog_ops + \d+ \s+ # translog_ops_recovered + -?\d+\.\d+% # translog_ops_percent \n )+ $/ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml index f264928c21b..dfafd833509 100755 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml @@ -48,11 +48,7 @@ merges.total_docs .+ \n merges.total_size .+ \n merges.total_time .+ \n - percolate.current .+ \n - percolate.memory_size .+ \n percolate.queries .+ \n - percolate.time .+ \n - percolate.total .+ \n refresh.total .+ \n refresh.time .+ \n search.fetch_current .+ \n diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yaml index 8d59e7c139c..780edacd7b6 100755 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yaml @@ -29,14 +29,14 @@ - do: cat.thread_pool: - h: id,ba,fa,gea,ga,ia,maa,ma,fma,pa + h: id,ba,fa,gea,ga,ia,maa,ma,fma v: true full_id: true - match: $body: | - /^ id \s+ ba \s+ fa \s+ gea \s+ ga \s+ ia \s+ maa \s+ fma \s+ pa \n - (\S+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \n)+ $/ + /^ id \s+ ba \s+ fa \s+ gea \s+ ga \s+ ia \s+ maa \s+ fma \n + (\S+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d+ \n)+ $/ - do: cat.thread_pool: @@ -108,16 +108,6 @@ /^ id \s+ force_merge.type \s+ force_merge.active \s+ force_merge.size \s+ force_merge.queue \s+ force_merge.queueSize \s+ force_merge.rejected \s+ force_merge.largest \s+ force_merge.completed \s+ force_merge.min \s+ force_merge.max \s+ force_merge.keepAlive \n (\S+ \s+ (cached|fixed|scaling)? \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \S* \n)+ $/ - - do: - cat.thread_pool: - h: id,percolate.type,percolate.active,percolate.size,percolate.queue,percolate.queueSize,percolate.rejected,percolate.largest,percolate.completed,percolate.min,percolate.max,percolate.keepAlive - v: true - - - match: - $body: | - /^ id \s+ percolate.type \s+ percolate.active \s+ percolate.size \s+ percolate.queue \s+ percolate.queueSize \s+ percolate.rejected \s+ percolate.largest \s+ percolate.completed \s+ percolate.min \s+ percolate.max \s+ percolate.keepAlive \n - (\S+ \s+ (cached|fixed|scaling)? \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \S* \n)+ $/ - - do: cat.thread_pool: h: id,refresh.type,refresh.active,refresh.size,refresh.queue,refresh.queueSize,refresh.rejected,refresh.largest,refresh.completed,refresh.min,refresh.max,refresh.keepAlive diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.analyze/10_analyze.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.analyze/10_analyze.yaml index 88160ef4f1e..93ffe0d5db1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.analyze/10_analyze.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.analyze/10_analyze.yaml @@ -75,7 +75,7 @@ setup: "Detail response with Analyzer": - do: indices.analyze: - body: {"text": "This is troubled", "analyzer": standard, "explain": true} + body: {"text": "This is troubled", "analyzer": standard, "explain": "true"} - length: { detail.analyzer.tokens: 3 } - match: { detail.analyzer.name: standard } - match: { detail.analyzer.tokens.0.token: this } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/11_metric.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/11_metric.yaml index ed676924f99..19598c7363e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/11_metric.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/11_metric.yaml @@ -31,7 +31,6 @@ setup: - is_true: _all.total.warmer - is_true: _all.total.query_cache - is_true: _all.total.fielddata - - is_true: _all.total.percolate - is_true: _all.total.completion - is_true: _all.total.segments - is_true: _all.total.translog @@ -54,7 +53,6 @@ setup: - is_true: _all.total.warmer - is_true: _all.total.query_cache - is_true: _all.total.fielddata - - is_true: _all.total.percolate - is_true: _all.total.completion - is_true: _all.total.segments - is_true: _all.total.translog @@ -77,7 +75,6 @@ setup: - is_false: _all.total.warmer - is_false: _all.total.query_cache - is_false: _all.total.fielddata - - is_false: _all.total.percolate - is_false: _all.total.completion - is_false: _all.total.segments - is_false: _all.total.translog @@ -100,7 +97,6 @@ setup: - is_false: _all.total.warmer - is_false: _all.total.query_cache - is_false: _all.total.fielddata - - is_false: _all.total.percolate - is_false: _all.total.completion - is_false: _all.total.segments - is_false: _all.total.translog @@ -124,7 +120,6 @@ setup: - is_false: _all.total.warmer - is_false: _all.total.query_cache - is_false: _all.total.fielddata - - is_false: _all.total.percolate - is_false: _all.total.completion - is_false: _all.total.segments - is_false: _all.total.translog diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/ingest/70_bulk.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/ingest/70_bulk.yaml index b363f018667..7a3515b2ed2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/ingest/70_bulk.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/ingest/70_bulk.yaml @@ -68,6 +68,24 @@ setup: - is_false: _source.field1 - is_false: _source.field2 + - do: + cluster.state: {} + # Get master node id + - set: { master_node: master } + + - do: + nodes.stats: + metric: [ ingest ] + #we can't assert anything here since we might have more than one node in the cluster + - gte: {nodes.$master.ingest.total.count: 0} + - gte: {nodes.$master.ingest.total.failed: 0} + - gte: {nodes.$master.ingest.total.time_in_millis: 0} + - match: {nodes.$master.ingest.total.current: 0} + - gte: {nodes.$master.ingest.pipelines.pipeline1.count: 0} + - match: {nodes.$master.ingest.pipelines.pipeline1.failed: 0} + - gte: {nodes.$master.ingest.pipelines.pipeline1.time_in_millis: 0} + - match: {nodes.$master.ingest.pipelines.pipeline1.current: 0} + --- "Test bulk request with default pipeline": @@ -88,6 +106,24 @@ setup: - f1: v2 - gte: { ingest_took: 0 } + - do: + cluster.state: {} + # Get master node id + - set: { master_node: master } + + - do: + nodes.stats: + metric: [ ingest ] + #we can't assert anything here since we might have more than one node in the cluster + - gte: {nodes.$master.ingest.total.count: 0} + - gte: {nodes.$master.ingest.total.failed: 0} + - gte: {nodes.$master.ingest.total.time_in_millis: 0} + - match: {nodes.$master.ingest.total.current: 0} + - gte: {nodes.$master.ingest.pipelines.pipeline2.count: 0} + - match: {nodes.$master.ingest.pipelines.pipeline2.failed: 0} + - gte: {nodes.$master.ingest.pipelines.pipeline2.time_in_millis: 0} + - match: {nodes.$master.ingest.pipelines.pipeline2.current: 0} + - do: get: index: test_index diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/percolate/16_existing_doc.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/percolate/16_existing_doc.yaml index 2430fe63a02..c6f12131f35 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/percolate/16_existing_doc.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/percolate/16_existing_doc.yaml @@ -108,5 +108,5 @@ term: tag: tag1 - - match: {'matches': [{_index: percolator_index, _id: test_percolator}]} + - match: {'matches': [{_index: percolator_index, _id: test_percolator, _score: 1.0}]} diff --git a/settings.gradle b/settings.gradle index f2518e69b12..d03cac653ee 100644 --- a/settings.gradle +++ b/settings.gradle @@ -11,6 +11,7 @@ List projects = [ 'test:framework', 'test:fixtures:example-fixture', 'test:fixtures:hdfs-fixture', + 'test:logger-usage', 'modules:ingest-grok', 'modules:lang-expression', 'modules:lang-groovy', @@ -38,6 +39,7 @@ List projects = [ 'plugins:repository-s3', 'plugins:jvm-example', 'plugins:store-smb', + 'qa:backwards-5.0', 'qa:evil-tests', 'qa:smoke-test-client', 'qa:smoke-test-multinode', diff --git a/test/build.gradle b/test/build.gradle index 564f8673307..7feb332b717 100644 --- a/test/build.gradle +++ b/test/build.gradle @@ -27,11 +27,11 @@ subprojects { apply plugin: 'elasticsearch.build' - // the main files are actually test files, so use the appopriate forbidden api sigs + // the main files are actually test files, so use the appropriate forbidden api sigs forbiddenApisMain { - bundledSignatures = ['jdk-unsafe', 'jdk-deprecated'] - signaturesURLs = [PrecommitTasks.getResource('/forbidden/all-signatures.txt'), - PrecommitTasks.getResource('/forbidden/test-signatures.txt')] + signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'), + PrecommitTasks.getResource('/forbidden/es-signatures.txt'), + PrecommitTasks.getResource('/forbidden/es-test-signatures.txt')] } // TODO: should we have licenses for our test deps? diff --git a/test/framework/build.gradle b/test/framework/build.gradle index 355459f99f5..af65c9ff7c9 100644 --- a/test/framework/build.gradle +++ b/test/framework/build.gradle @@ -36,9 +36,8 @@ dependencies { compileJava.options.compilerArgs << '-Xlint:-cast,-rawtypes,-try,-unchecked' compileTestJava.options.compilerArgs << '-Xlint:-rawtypes' -// the main files are actually test files, so use the appopriate forbidden api sigs +// the main files are actually test files, so use the appropriate forbidden api sigs forbiddenApisMain { - bundledSignatures = ['jdk-unsafe', 'jdk-deprecated'] signaturesURLs = [PrecommitTasks.getResource('/forbidden/all-signatures.txt'), PrecommitTasks.getResource('/forbidden/test-signatures.txt')] } diff --git a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java index aa77c670a42..68eb0420b39 100644 --- a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java +++ b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java @@ -166,7 +166,7 @@ public class BootstrapForTesting { } /** - * we dont know which codesources belong to which plugin, so just remove the permission from key codebases + * we don't know which codesources belong to which plugin, so just remove the permission from key codebases * like core, test-framework, etc. this way tests fail if accesscontroller blocks are missing. */ @SuppressForbidden(reason = "accesses fully qualified URLs to configure security") diff --git a/test/framework/src/main/java/org/elasticsearch/cli/CommandTestCase.java b/test/framework/src/main/java/org/elasticsearch/cli/CommandTestCase.java new file mode 100644 index 00000000000..e9c6a2eec9c --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/cli/CommandTestCase.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cli; + +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +/** + * A base test case for cli tools. + */ +public abstract class CommandTestCase extends ESTestCase { + + /** The terminal that execute uses. */ + protected final MockTerminal terminal = new MockTerminal(); + + /** The last command that was executed. */ + protected Command command; + + @Before + public void resetTerminal() { + terminal.reset(); + terminal.setVerbosity(Terminal.Verbosity.NORMAL); + } + + /** Creates a Command to test execution. */ + protected abstract Command newCommand(); + + /** + * Runs the command with the given args. + * + * Output can be found in {@link #terminal}. + * The command created can be found in {@link #command}. + */ + public String execute(String... args) throws Exception { + command = newCommand(); + command.mainWithoutErrorHandling(args, terminal); + return terminal.getOutput(); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/cli/MockTerminal.java b/test/framework/src/main/java/org/elasticsearch/cli/MockTerminal.java new file mode 100644 index 00000000000..bd8bd493cea --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/cli/MockTerminal.java @@ -0,0 +1,87 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cli; + +import java.io.ByteArrayOutputStream; +import java.io.OutputStreamWriter; +import java.io.PrintWriter; +import java.io.UnsupportedEncodingException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayDeque; +import java.util.Deque; + +/** + * A terminal for tests which captures all output, and + * can be plugged with fake input. + */ +public class MockTerminal extends Terminal { + + private final ByteArrayOutputStream buffer = new ByteArrayOutputStream(); + private final PrintWriter writer = new PrintWriter(new OutputStreamWriter(buffer, StandardCharsets.UTF_8)); + private final Deque textInput = new ArrayDeque<>(); + private final Deque secretInput = new ArrayDeque<>(); + + public MockTerminal() { + super("\n"); // always *nix newlines for tests + } + + @Override + public String readText(String prompt) { + if (textInput.isEmpty()) { + throw new IllegalStateException("No text input configured for prompt [" + prompt + "]"); + } + return textInput.removeFirst(); + } + + @Override + public char[] readSecret(String prompt) { + if (secretInput.isEmpty()) { + throw new IllegalStateException("No secret input configured for prompt [" + prompt + "]"); + } + return secretInput.removeFirst().toCharArray(); + } + + @Override + public PrintWriter getWriter() { + return writer; + } + + /** Adds an an input that will be return from {@link #readText(String)}. Values are read in FIFO order. */ + public void addTextInput(String input) { + textInput.addLast(input); + } + + /** Adds an an input that will be return from {@link #readText(String)}. Values are read in FIFO order. */ + public void addSecretInput(String input) { + secretInput.addLast(input); + } + + /** Returns all output written to this terminal. */ + public String getOutput() throws UnsupportedEncodingException { + return buffer.toString("UTF-8"); + } + + /** Wipes the input and output. */ + public void reset() { + buffer.reset(); + textInput.clear(); + secretInput.clear(); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java b/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java index 3e9b0c09cb2..5b973f67abc 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; @@ -74,7 +75,7 @@ public class MockInternalClusterInfoService extends InternalClusterInfoService { null, null, null, null, null, fsInfo, null, null, null, - null, null); + null, null, null); } @Inject diff --git a/test/framework/src/main/java/org/elasticsearch/common/cli/CliToolTestCase.java b/test/framework/src/main/java/org/elasticsearch/common/cli/CliToolTestCase.java deleted file mode 100644 index 6d6c176b27d..00000000000 --- a/test/framework/src/main/java/org/elasticsearch/common/cli/CliToolTestCase.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.cli; - -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.StreamsUtils; -import org.junit.After; -import org.junit.Before; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.hasSize; - -public abstract class CliToolTestCase extends ESTestCase { - - @Before - @SuppressForbidden(reason = "sets es.default.path.home during tests") - public void setPathHome() { - System.setProperty("es.default.path.home", createTempDir().toString()); - } - - @After - @SuppressForbidden(reason = "clears es.default.path.home during tests") - public void clearPathHome() { - System.clearProperty("es.default.path.home"); - } - - public static String[] args(String command) { - if (!Strings.hasLength(command)) { - return Strings.EMPTY_ARRAY; - } - return command.split("\\s+"); - } - - /** - * A terminal implementation that discards everything - */ - public static class MockTerminal extends Terminal { - - @Override - protected void doPrint(String msg) {} - - @Override - public String readText(String prompt) { - return null; - } - - @Override - public char[] readSecret(String prompt) { - return new char[0]; - } - } - - /** - * A terminal implementation that captures everything written to it - */ - public static class CaptureOutputTerminal extends MockTerminal { - - List terminalOutput = new ArrayList<>(); - - public CaptureOutputTerminal() { - this(Verbosity.NORMAL); - } - - public CaptureOutputTerminal(Verbosity verbosity) { - setVerbosity(verbosity); - } - - @Override - protected void doPrint(String msg) { - terminalOutput.add(msg); - } - - public List getTerminalOutput() { - return terminalOutput; - } - } - - public static void assertTerminalOutputContainsHelpFile(CliToolTestCase.CaptureOutputTerminal terminal, String classPath) throws IOException { - List nonEmptyLines = new ArrayList<>(); - for (String line : terminal.getTerminalOutput()) { - String originalPrintedLine = line.replaceAll(System.lineSeparator(), ""); - if (Strings.isNullOrEmpty(originalPrintedLine)) { - nonEmptyLines.add(originalPrintedLine); - } - } - assertThat(nonEmptyLines, hasSize(greaterThan(0))); - - String expectedDocs = StreamsUtils.copyToStringFromClasspath(classPath); - for (String nonEmptyLine : nonEmptyLines) { - assertThat(expectedDocs, containsString(nonEmptyLine.replaceAll(System.lineSeparator(), ""))); - } - } -} diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MockFieldMapper.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MockFieldMapper.java new file mode 100644 index 00000000000..8a28a16220c --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MockFieldMapper.java @@ -0,0 +1,80 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import java.io.IOException; +import java.util.List; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.settings.Settings; + +// this sucks how much must be overridden just do get a dummy field mapper... +public class MockFieldMapper extends FieldMapper { + static Settings dummySettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT.id).build(); + + public MockFieldMapper(String fullName) { + this(fullName, new FakeFieldType()); + } + + public MockFieldMapper(String fullName, MappedFieldType fieldType) { + super(findSimpleName(fullName), setName(fullName, fieldType), setName(fullName, fieldType), dummySettings, + MultiFields.empty(), new CopyTo.Builder().build()); + } + + static MappedFieldType setName(String fullName, MappedFieldType fieldType) { + fieldType.setName(fullName); + return fieldType; + } + + static String findSimpleName(String fullName) { + int ndx = fullName.lastIndexOf('.'); + return fullName.substring(ndx + 1); + } + + static class FakeFieldType extends MappedFieldType { + public FakeFieldType() { + } + + protected FakeFieldType(FakeFieldType ref) { + super(ref); + } + + @Override + public MappedFieldType clone() { + return new FakeFieldType(this); + } + + @Override + public String typeName() { + return "faketype"; + } + } + + @Override + protected String contentType() { + return null; + } + + @Override + protected void parseCreateField(ParseContext context, List list) throws IOException { + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java b/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java index 7496bfb8263..4022366c1d0 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java +++ b/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java @@ -20,7 +20,7 @@ package org.elasticsearch.search; import org.elasticsearch.cache.recycler.PageCacheRecycler; -import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -33,6 +33,7 @@ import org.elasticsearch.search.dfs.DfsPhase; import org.elasticsearch.search.fetch.FetchPhase; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.query.QueryPhase; +import org.elasticsearch.search.suggest.Suggesters; import org.elasticsearch.threadpool.ThreadPool; import java.util.HashMap; @@ -69,9 +70,9 @@ public class MockSearchService extends SearchService { public MockSearchService(Settings settings, ClusterSettings clusterSettings, ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, DfsPhase dfsPhase, QueryPhase queryPhase, FetchPhase fetchPhase, - AggregatorParsers aggParsers) { + AggregatorParsers aggParsers, Suggesters suggesters) { super(settings, clusterSettings, clusterService, indicesService, threadPool, scriptService, pageCacheRecycler, bigArrays, dfsPhase, - queryPhase, fetchPhase, aggParsers); + queryPhase, fetchPhase, aggParsers, suggesters); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java b/test/framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java index 15b72c4cccd..916adc142c8 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java @@ -88,11 +88,11 @@ public final class CorruptionUtils { // we need to add assumptions here that the checksums actually really don't match there is a small chance to get collisions // in the checksum which is ok though.... StringBuilder msg = new StringBuilder(); - msg.append("Checksum before: [").append(checksumBeforeCorruption).append("]"); - msg.append(" after: [").append(checksumAfterCorruption).append("]"); - msg.append(" checksum value after corruption: ").append(actualChecksumAfterCorruption).append("]"); - msg.append(" file: ").append(fileToCorrupt.getFileName()).append(" length: ").append(dir.fileLength(fileToCorrupt.getFileName().toString())); - logger.info(msg.toString()); + msg.append("before: [").append(checksumBeforeCorruption).append("] "); + msg.append("after: [").append(checksumAfterCorruption).append("] "); + msg.append("checksum value after corruption: ").append(actualChecksumAfterCorruption).append("] "); + msg.append("file: ").append(fileToCorrupt.getFileName()).append(" length: ").append(dir.fileLength(fileToCorrupt.getFileName().toString())); + logger.info("Checksum {}", msg); assumeTrue("Checksum collision - " + msg.toString(), checksumAfterCorruption != checksumBeforeCorruption // collision || actualChecksumAfterCorruption != checksumBeforeCorruption); // checksum corrupted diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java index f653819c140..1a38e32cf1a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java @@ -33,7 +33,8 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation; -import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators; +import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; +import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.Decision; @@ -79,19 +80,19 @@ public abstract class ESAllocationTestCase extends ESTestCase { public static MockAllocationService createAllocationService(Settings settings, ClusterSettings clusterSettings, Random random) { return new MockAllocationService(settings, randomAllocationDeciders(settings, clusterSettings, random), - new ShardsAllocators(settings, NoopGatewayAllocator.INSTANCE), EmptyClusterInfoService.INSTANCE); + NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(settings), EmptyClusterInfoService.INSTANCE); } public static MockAllocationService createAllocationService(Settings settings, ClusterInfoService clusterInfoService) { return new MockAllocationService(settings, randomAllocationDeciders(settings, new ClusterSettings(Settings.Builder.EMPTY_SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), getRandom()), - new ShardsAllocators(settings, NoopGatewayAllocator.INSTANCE), clusterInfoService); + NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(settings), clusterInfoService); } - public static MockAllocationService createAllocationService(Settings settings, GatewayAllocator allocator) { + public static MockAllocationService createAllocationService(Settings settings, GatewayAllocator gatewayAllocator) { return new MockAllocationService(settings, randomAllocationDeciders(settings, new ClusterSettings(Settings.Builder.EMPTY_SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), getRandom()), - new ShardsAllocators(settings, allocator), EmptyClusterInfoService.INSTANCE); + gatewayAllocator, new BalancedShardsAllocator(settings), EmptyClusterInfoService.INSTANCE); } @@ -193,8 +194,9 @@ public abstract class ESAllocationTestCase extends ESTestCase { private Long nanoTimeOverride = null; - public MockAllocationService(Settings settings, AllocationDeciders allocationDeciders, ShardsAllocators shardsAllocators, ClusterInfoService clusterInfoService) { - super(settings, allocationDeciders, shardsAllocators, clusterInfoService); + public MockAllocationService(Settings settings, AllocationDeciders allocationDeciders, GatewayAllocator gatewayAllocator, + ShardsAllocator shardsAllocator, ClusterInfoService clusterInfoService) { + super(settings, allocationDeciders, gatewayAllocator, shardsAllocator, clusterInfoService); } public void setNanoTimeOverride(long nanoTime) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index a3161f4090f..24a7360d921 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -41,6 +41,7 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsResponse; import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; +import org.elasticsearch.action.admin.indices.get.GetIndexResponse; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; @@ -56,7 +57,6 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.AdminClient; import org.elasticsearch.client.Client; import org.elasticsearch.client.Requests; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -68,6 +68,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; @@ -77,6 +78,7 @@ import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.common.transport.InetSocketTransportAddress; @@ -95,20 +97,22 @@ import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.env.Environment; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.MockEngineFactoryPlugin; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexWarmer; +import org.elasticsearch.index.MergePolicyConfig; +import org.elasticsearch.index.MergeSchedulerConfig; +import org.elasticsearch.index.MockEngineFactoryPlugin; import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType.Loading; import org.elasticsearch.index.mapper.internal.TimestampFieldMapper; -import org.elasticsearch.index.MergePolicyConfig; -import org.elasticsearch.index.MergeSchedulerConfig; import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.index.IndexWarmer; import org.elasticsearch.indices.IndicesRequestCache; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.store.IndicesStore; import org.elasticsearch.node.NodeMocksPlugin; import org.elasticsearch.plugins.Plugin; @@ -196,7 +200,7 @@ import static org.hamcrest.Matchers.startsWith; * should be used, here is an example: *
        *
      - * {@literal @}ClusterScope(scope=Scope.TEST) public class SomeIT extends ESIntegTestCase {
      + * {@literal @}NodeScope(scope=Scope.TEST) public class SomeIT extends ESIntegTestCase {
        * public void testMethod() {}
        * }
        * 
      @@ -207,7 +211,7 @@ import static org.hamcrest.Matchers.startsWith; * determined at random and can change across tests. The {@link ClusterScope} allows configuring the initial number of nodes * that are created before the tests start. *
      - * {@literal @}ClusterScope(scope=Scope.SUITE, numDataNodes=3)
      + * {@literal @}NodeScope(scope=Scope.SUITE, numDataNodes=3)
        * public class SomeIT extends ESIntegTestCase {
        * public void testMethod() {}
        * }
      @@ -268,7 +272,8 @@ public abstract class ESIntegTestCase extends ESTestCase {
            * The value of this seed can be used to initialize a random context for a specific index.
            * It's set once per test via a generic index template.
            */
      -    public static final Setting INDEX_TEST_SEED_SETTING = Setting.longSetting("index.tests.seed", 0, Long.MIN_VALUE, false, Setting.Scope.INDEX);
      +    public static final Setting INDEX_TEST_SEED_SETTING =
      +        Setting.longSetting("index.tests.seed", 0, Long.MIN_VALUE, Property.IndexScope);
       
           /**
            * A boolean value to enable or disable mock modules. This is useful to test the
      @@ -384,6 +389,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
                   } else {
                       randomSettingsBuilder.put("index.codec", CodecService.LUCENE_DEFAULT_CODEC);
                   }
      +
                   XContentBuilder mappings = null;
                   if (frequently() && randomDynamicTemplates()) {
                       mappings = XContentFactory.jsonBuilder().startObject().startObject("_default_");
      @@ -450,7 +456,15 @@ public abstract class ESIntegTestCase extends ESTestCase {
                   for (String setting : randomSettingsBuilder.internalMap().keySet()) {
                       assertThat("non index. prefix setting set on index template, its a node setting...", setting, startsWith("index."));
                   }
      +            // always default delayed allocation to 0 to make sure we have tests are not delayed
      +            randomSettingsBuilder.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), 0);
      +            if (randomBoolean()) {
      +                randomSettingsBuilder.put(IndexModule.INDEX_QUERY_CACHE_TYPE_SETTING.getKey(), randomBoolean() ? IndexModule.INDEX_QUERY_CACHE : IndexModule.NONE_QUERY_CACHE);
      +            }
       
      +            if (randomBoolean()) {
      +                randomSettingsBuilder.put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), randomBoolean());
      +            }
                   PutIndexTemplateRequestBuilder putTemplate = client().admin().indices()
                           .preparePutTemplate("random_index_template")
                           .setTemplate("*")
      @@ -736,6 +750,8 @@ public abstract class ESIntegTestCase extends ESTestCase {
                   logger.info("using custom data_path for index: [{}]", dataPath);
                   builder.put(IndexMetaData.SETTING_DATA_PATH, dataPath);
               }
      +        // always default delayed allocation to 0 to make sure we have tests are not delayed
      +        builder.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), 0);
               return builder.build();
           }
       
      @@ -836,7 +852,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
               assertThat(nodes, Matchers.not(Matchers.emptyIterable()));
               for (String node : nodes) {
                   IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node);
      -            IndexService indexService = indicesService.indexService(index);
      +            IndexService indexService = indicesService.indexService(resolveIndex(index));
                   assertThat("index service doesn't exists on " + node, indexService, notNullValue());
                   DocumentMapper documentMapper = indexService.mapperService().documentMapper(type);
                   assertThat("document mapper doesn't exists on " + node, documentMapper, notNullValue());
      @@ -881,7 +897,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
                       sb.append("\n-> _index: [").append(hit.getIndex()).append("] type [").append(hit.getType())
                               .append("] id [").append(hit.id()).append("]");
                   }
      -            logger.warn(sb.toString());
      +            logger.warn("{}", sb);
                   fail(failMsg);
               }
           }
      @@ -1096,7 +1112,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
                   // remove local node reference
                   masterClusterState = ClusterState.Builder.fromBytes(masterClusterStateBytes, null);
                   Map masterStateMap = convertToMap(masterClusterState);
      -            int masterClusterStateSize = masterClusterState.toString().length();
      +            int masterClusterStateSize = ClusterState.Builder.toBytes(masterClusterState).length;
                   String masterId = masterClusterState.nodes().masterNodeId();
                   for (Client client : cluster().getClients()) {
                       ClusterState localClusterState = client.admin().cluster().prepareState().all().setLocal(true).get().getState();
      @@ -1104,7 +1120,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
                       // remove local node reference
                       localClusterState = ClusterState.Builder.fromBytes(localClusterStateBytes, null);
                       final Map localStateMap = convertToMap(localClusterState);
      -                final int localClusterStateSize = localClusterState.toString().length();
      +                final int localClusterStateSize = ClusterState.Builder.toBytes(localClusterState).length;
                       // Check that the non-master node has the same version of the cluster state as the master and
                       // that the master node matches the master (otherwise there is no requirement for the cluster state to match)
                       if (masterClusterState.version() == localClusterState.version() && masterId.equals(localClusterState.nodes().masterNodeId())) {
      @@ -1696,7 +1712,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
       
           /** Helper method to create list of plugins without specifying generic types. */
           @SafeVarargs
      -    @SuppressWarnings("varargs") // due to type erasure, the varargs type is non-reifiable, which casues this warning
      +    @SuppressWarnings("varargs") // due to type erasure, the varargs type is non-reifiable, which causes this warning
           protected final Collection> pluginList(Class... plugins) {
               return Arrays.asList(plugins);
           }
      @@ -1704,7 +1720,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
           /**
            * This method is used to obtain additional settings for clients created by the internal cluster.
            * These settings will be applied on the client in addition to some randomized settings defined in
      -     * the cluster. These setttings will also override any other settings the internal cluster might
      +     * the cluster. These settings will also override any other settings the internal cluster might
            * add by default.
            */
           protected Settings transportClientSettings() {
      @@ -1840,7 +1856,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
               }
               @Override
               public String description() {
      -            return "a test plugin that registeres index.tests.seed as an index setting";
      +            return "a test plugin that registers index.tests.seed as an index setting";
               }
               public void onModule(SettingsModule module) {
                   module.registerSetting(INDEX_TEST_SEED_SETTING);
      @@ -1981,7 +1997,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
           @After
           public final void after() throws Exception {
               printTestMessage("finished");
      -        // Deleting indices is going to clear search contexts implicitely so we
      +        // Deleting indices is going to clear search contexts implicitly so we
               // need to check that there are no more in-flight search contexts before
               // we remove indices
               super.ensureAllSearchContextsReleased();
      @@ -2041,7 +2057,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
            * of the provided index.
            */
           protected String routingKeyForShard(String index, String type, int shard) {
      -        return internalCluster().routingKeyForShard(index, type, shard, getRandom());
      +        return internalCluster().routingKeyForShard(resolveIndex(index), type, shard, getRandom());
           }
       
           /**
      @@ -2144,4 +2160,11 @@ public abstract class ESIntegTestCase extends ESTestCase {
           public @interface SuppressNetworkMode {
           }
       
      +    public static Index resolveIndex(String index) {
      +        GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().setIndices(index).get();
      +        assertTrue("index " + index + " not found", getIndexResponse.getSettings().containsKey(index));
      +        String uuid = getIndexResponse.getSettings().get(index).get(IndexMetaData.SETTING_INDEX_UUID);
      +        return new Index(index, uuid);
      +    }
      +
       }
      diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java
      index fc713400262..8eeb96a94bf 100644
      --- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java
      +++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java
      @@ -22,6 +22,8 @@ import org.apache.lucene.util.IOUtils;
       import org.elasticsearch.Version;
       import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
       import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
      +import org.elasticsearch.action.admin.indices.get.GetIndexResponse;
      +import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequestBuilder;
       import org.elasticsearch.cache.recycler.PageCacheRecycler;
       import org.elasticsearch.client.Client;
       import org.elasticsearch.client.Requests;
      @@ -31,13 +33,13 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
       import org.elasticsearch.cluster.metadata.MetaData;
       import org.elasticsearch.cluster.node.DiscoveryNode;
       import org.elasticsearch.common.Priority;
      -import org.elasticsearch.common.lease.Releasables;
       import org.elasticsearch.common.settings.Settings;
       import org.elasticsearch.common.unit.TimeValue;
       import org.elasticsearch.common.util.BigArrays;
       import org.elasticsearch.common.util.concurrent.EsExecutors;
       import org.elasticsearch.common.xcontent.XContentBuilder;
       import org.elasticsearch.env.Environment;
      +import org.elasticsearch.index.Index;
       import org.elasticsearch.index.IndexService;
       import org.elasticsearch.indices.IndicesService;
       import org.elasticsearch.node.MockNode;
      @@ -83,6 +85,12 @@ public abstract class ESSingleNodeTestCase extends ESTestCase {
               // SERVICE_UNAVAILABLE/1/state not recovered / initialized block
               ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForGreenStatus().get();
               assertFalse(clusterHealthResponse.isTimedOut());
      +        client().admin().indices()
      +            .preparePutTemplate("random_index_template")
      +            .setTemplate("*")
      +            .setOrder(0)
      +            .setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
      +            .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)).get();
           }
       
           private static void stopNode() throws IOException {
      @@ -108,7 +116,7 @@ public abstract class ESSingleNodeTestCase extends ESTestCase {
           public void setUp() throws Exception {
               super.setUp();
               // Create the node lazily, on the first test. This is ok because we do not randomize any settings,
      -        // only the cluster name. This allows us to have overriden properties for plugins and the version to use.
      +        // only the cluster name. This allows us to have overridden properties for plugins and the version to use.
               if (NODE == null) {
                   startNode();
               }
      @@ -153,11 +161,16 @@ public abstract class ESSingleNodeTestCase extends ESTestCase {
       
           /** Helper method to create list of plugins without specifying generic types. */
           @SafeVarargs
      -    @SuppressWarnings("varargs") // due to type erasure, the varargs type is non-reifiable, which casues this warning
      +    @SuppressWarnings("varargs") // due to type erasure, the varargs type is non-reifiable, which causes this warning
           protected final Collection> pluginList(Class... plugins) {
               return Arrays.asList(plugins);
           }
       
      +    /** Additional settings to add when creating the node. Also allows overriding the default settings. */
      +    protected Settings nodeSettings() {
      +        return Settings.EMPTY;
      +    }
      +
           private Node newNode() {
               Settings settings = Settings.builder()
                   .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), InternalTestCluster.clusterName("single-node-cluster", randomLong()))
      @@ -166,8 +179,7 @@ public abstract class ESSingleNodeTestCase extends ESTestCase {
                   // This needs to tie into the ESIntegTestCase#indexSettings() method
                   .put(Environment.PATH_SHARED_DATA_SETTING.getKey(), createTempDir().getParent())
                   .put("node.name", nodeName())
      -            .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
      -            .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
      +
                   .put("script.inline", "true")
                   .put("script.indexed", "true")
                   .put(EsExecutors.PROCESSORS_SETTING.getKey(), 1) // limit the number of threads created
      @@ -175,6 +187,7 @@ public abstract class ESSingleNodeTestCase extends ESTestCase {
                   .put(Node.NODE_LOCAL_SETTING.getKey(), true)
                   .put(Node.NODE_DATA_SETTING.getKey(), true)
                   .put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING.getKey(), true) // make sure we get what we set :)
      +            .put(nodeSettings()) // allow test cases to provide their own settings or override these
                   .build();
               Node build = new MockNode(settings, getVersion(), getPlugins());
               build.start();
      @@ -255,7 +268,14 @@ public abstract class ESSingleNodeTestCase extends ESTestCase {
               assertThat(health.getStatus(), lessThanOrEqualTo(ClusterHealthStatus.YELLOW));
               assertThat("Cluster must be a single node cluster", health.getNumberOfDataNodes(), equalTo(1));
               IndicesService instanceFromNode = getInstanceFromNode(IndicesService.class);
      -        return instanceFromNode.indexServiceSafe(index);
      +        return instanceFromNode.indexServiceSafe(resolveIndex(index));
      +    }
      +
      +    public Index resolveIndex(String index) {
      +        GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().setIndices(index).get();
      +        assertTrue("index " + index + " not found", getIndexResponse.getSettings().containsKey(index));
      +        String uuid = getIndexResponse.getSettings().get(index).get(IndexMetaData.SETTING_INDEX_UUID);
      +        return new Index(index, uuid);
           }
       
           /**
      diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java
      index 4a20d3c3fd6..23e58b0ed17 100644
      --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java
      +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java
      @@ -29,7 +29,6 @@ import com.carrotsearch.randomizedtesting.generators.RandomInts;
       import com.carrotsearch.randomizedtesting.generators.RandomPicks;
       import com.carrotsearch.randomizedtesting.generators.RandomStrings;
       import com.carrotsearch.randomizedtesting.rules.TestRuleAdapter;
      -import junit.framework.AssertionFailedError;
       import org.apache.lucene.uninverting.UninvertingReader;
       import org.apache.lucene.util.LuceneTestCase;
       import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
      @@ -46,10 +45,14 @@ import org.elasticsearch.common.io.PathUtilsForTesting;
       import org.elasticsearch.common.logging.ESLogger;
       import org.elasticsearch.common.logging.Loggers;
       import org.elasticsearch.common.settings.Settings;
      +import org.elasticsearch.common.settings.SettingsModule;
       import org.elasticsearch.common.util.MockBigArrays;
       import org.elasticsearch.common.xcontent.XContentType;
       import org.elasticsearch.env.Environment;
       import org.elasticsearch.env.NodeEnvironment;
      +import org.elasticsearch.index.Index;
      +import org.elasticsearch.index.analysis.AnalysisService;
      +import org.elasticsearch.indices.analysis.AnalysisModule;
       import org.elasticsearch.search.MockSearchService;
       import org.elasticsearch.test.junit.listeners.LoggingListener;
       import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter;
      @@ -67,6 +70,7 @@ import java.nio.file.Files;
       import java.nio.file.Path;
       import java.util.ArrayList;
       import java.util.Arrays;
      +import java.util.Collection;
       import java.util.Collections;
       import java.util.List;
       import java.util.Random;
      @@ -75,7 +79,10 @@ import java.util.concurrent.ExecutorService;
       import java.util.concurrent.Executors;
       import java.util.concurrent.TimeUnit;
       import java.util.function.BooleanSupplier;
      +import java.util.function.Consumer;
      +import java.util.function.Supplier;
       
      +import static org.elasticsearch.common.settings.Settings.settingsBuilder;
       import static org.elasticsearch.common.util.CollectionUtils.arrayAsArrayList;
       import static org.hamcrest.Matchers.equalTo;
       
      @@ -130,7 +137,7 @@ public abstract class ESTestCase extends LuceneTestCase {
           protected void afterIfFailed(List errors) {
           }
       
      -    /** called after a test is finished, but only if succesfull */
      +    /** called after a test is finished, but only if successful */
           protected void afterIfSuccessful() throws Exception {
           }
       
      @@ -282,10 +289,10 @@ public abstract class ESTestCase extends LuceneTestCase {
            * Returns a double value in the interval [start, end) if lowerInclusive is
            * set to true, (start, end) otherwise.
            *
      -     * @param start lower bound of interval to draw uniformly distributed random numbers from
      -     * @param end upper bound
      +     * @param start          lower bound of interval to draw uniformly distributed random numbers from
      +     * @param end            upper bound
            * @param lowerInclusive whether or not to include lower end of the interval
      -     * */
      +     */
           public static double randomDoubleBetween(double start, double end, boolean lowerInclusive) {
               double result = 0.0;
       
      @@ -396,6 +403,26 @@ public abstract class ESTestCase extends LuceneTestCase {
               return randomTimeValue(1, 1000);
           }
       
      +    /**
      +     * helper to randomly perform on consumer with value
      +     */
      +    public static  void maybeSet(Consumer consumer, T value) {
      +        if (randomBoolean()) {
      +            consumer.accept(value);
      +        }
      +    }
      +
      +    /**
      +     * helper to get a random value in a certain range that's different from the input
      +     */
      +    public static  T randomValueOtherThan(T input, Supplier randomSupplier) {
      +        T randomValue = null;
      +        do {
      +            randomValue = randomSupplier.get();
      +        } while (randomValue.equals(input));
      +        return randomValue;
      +    }
      +
           /**
            * Runs the code block for 10 seconds waiting for no assertion to trip.
            */
      @@ -555,12 +582,27 @@ public abstract class ESTestCase extends LuceneTestCase {
            * Returns size random values
            */
           public static  List randomSubsetOf(int size, T... values) {
      -        if (size > values.length) {
      -            throw new IllegalArgumentException("Can\'t pick " + size + " random objects from a list of " + values.length + " objects");
      -        }
               List list = arrayAsArrayList(values);
      -        Collections.shuffle(list, random());
      -        return list.subList(0, size);
      +        return randomSubsetOf(size, list);
      +    }
      +
      +    /**
      +     * Returns a random subset of values (including a potential empty list)
      +     */
      +    public static  List randomSubsetOf(Collection collection) {
      +        return randomSubsetOf(randomInt(collection.size() - 1), collection);
      +    }
      +
      +    /**
      +     * Returns size random values
      +     */
      +    public static  List randomSubsetOf(int size, Collection collection) {
      +        if (size > collection.size()) {
      +            throw new IllegalArgumentException("Can\'t pick " + size + " random objects from a collection of " + collection.size() + " objects");
      +        }
      +        List tempList = new ArrayList<>(collection);
      +        Collections.shuffle(tempList, random());
      +        return tempList.subList(0, size);
           }
       
           /**
      @@ -631,27 +673,6 @@ public abstract class ESTestCase extends LuceneTestCase {
               assertEquals(expected.isNativeMethod(), actual.isNativeMethod());
           }
       
      -    /** A runnable that can throw any checked exception. */
      -    @FunctionalInterface
      -    public interface ThrowingRunnable {
      -        void run() throws Throwable;
      -    }
      -
      -    /** Checks a specific exception class is thrown by the given runnable, and returns it. */
      -    public static  T expectThrows(Class expectedType, ThrowingRunnable runnable) {
      -        try {
      -            runnable.run();
      -        } catch (Throwable e) {
      -            if (expectedType.isInstance(e)) {
      -                return expectedType.cast(e);
      -            }
      -            AssertionFailedError assertion = new AssertionFailedError("Unexpected exception type, expected " + expectedType.getSimpleName());
      -            assertion.initCause(e);
      -            throw assertion;
      -        }
      -        throw new AssertionFailedError("Expected exception " + expectedType.getSimpleName());
      -    }
      -
           protected static long spinForAtLeastOneMillisecond() {
               long nanosecondsInMillisecond = TimeUnit.NANOSECONDS.convert(1, TimeUnit.MILLISECONDS);
               // force at least one millisecond to elapse, but ensure the
      @@ -663,4 +684,24 @@ public abstract class ESTestCase extends LuceneTestCase {
               }
               return elapsed;
           }
      +
      +    /**
      +     * Creates an AnalysisService to test analysis factories and analyzers.
      +     */
      +    @SafeVarargs
      +    public static AnalysisService createAnalysisService(Index index, Settings settings, Consumer... moduleConsumers) throws IOException {
      +        Settings indexSettings = settingsBuilder().put(settings)
      +            .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
      +            .build();
      +        Settings nodeSettings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()).build();
      +        Environment env = new Environment(nodeSettings);
      +        AnalysisModule analysisModule = new AnalysisModule(env);
      +        for (Consumer consumer : moduleConsumers) {
      +            consumer.accept(analysisModule);
      +        }
      +        SettingsModule settingsModule = new SettingsModule(nodeSettings);
      +        settingsModule.registerSetting(InternalSettingsPlugin.VERSION_CREATED);
      +        final AnalysisService analysisService = analysisModule.buildRegistry().build(IndexSettingsModule.newIndexSettings(index, indexSettings));
      +        return analysisService;
      +    }
       }
      diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalSettingsPlugin.java b/test/framework/src/main/java/org/elasticsearch/test/InternalSettingsPlugin.java
      index 64719f0f9de..f76ae7b4b56 100644
      --- a/test/framework/src/main/java/org/elasticsearch/test/InternalSettingsPlugin.java
      +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalSettingsPlugin.java
      @@ -20,6 +20,7 @@ package org.elasticsearch.test;
       
       import org.elasticsearch.cluster.metadata.IndexMetaData;
       import org.elasticsearch.common.settings.Setting;
      +import org.elasticsearch.common.settings.Setting.Property;
       import org.elasticsearch.common.settings.SettingsModule;
       import org.elasticsearch.plugins.Plugin;
       
      @@ -31,12 +32,15 @@ public final class InternalSettingsPlugin extends Plugin {
       
           @Override
           public String description() {
      -        return "a plugin that allows to set values for internal settings which are can't be set via the ordinary API without this pluging installed";
      +        return "a plugin that allows to set values for internal settings which are can't be set via the ordinary API without this plugin installed";
           }
       
      -    public static final Setting VERSION_CREATED = Setting.intSetting("index.version.created", 0, false, Setting.Scope.INDEX);
      -    public static final Setting MERGE_ENABLED = Setting.boolSetting("index.merge.enabled", true, false, Setting.Scope.INDEX);
      -    public static final Setting INDEX_CREATION_DATE_SETTING = Setting.longSetting(IndexMetaData.SETTING_CREATION_DATE, -1, -1, false, Setting.Scope.INDEX);
      +    public static final Setting VERSION_CREATED =
      +        Setting.intSetting("index.version.created", 0, Property.IndexScope, Property.NodeScope);
      +    public static final Setting MERGE_ENABLED =
      +        Setting.boolSetting("index.merge.enabled", true, Property.IndexScope, Property.NodeScope);
      +    public static final Setting INDEX_CREATION_DATE_SETTING =
      +        Setting.longSetting(IndexMetaData.SETTING_CREATION_DATE, -1, -1, Property.IndexScope, Property.NodeScope);
       
           public void onModule(SettingsModule module) {
               module.registerSetting(VERSION_CREATED);
      diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java
      index d90ff1b8d17..dd49cfa4f04 100644
      --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java
      +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java
      @@ -34,18 +34,17 @@ import org.elasticsearch.cache.recycler.PageCacheRecycler;
       import org.elasticsearch.client.Client;
       import org.elasticsearch.client.transport.TransportClient;
       import org.elasticsearch.cluster.ClusterName;
      -import org.elasticsearch.cluster.ClusterService;
       import org.elasticsearch.cluster.ClusterState;
       import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
       import org.elasticsearch.cluster.metadata.IndexMetaData;
       import org.elasticsearch.cluster.node.DiscoveryNode;
      +import org.elasticsearch.cluster.node.DiscoveryNodeService;
       import org.elasticsearch.cluster.node.DiscoveryNodes;
       import org.elasticsearch.cluster.routing.OperationRouting;
       import org.elasticsearch.cluster.routing.ShardRouting;
      -import org.elasticsearch.cluster.routing.UnassignedInfo;
       import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider;
       import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
      -import org.elasticsearch.cluster.service.InternalClusterService;
      +import org.elasticsearch.cluster.service.ClusterService;
       import org.elasticsearch.common.Nullable;
       import org.elasticsearch.common.Strings;
       import org.elasticsearch.common.breaker.CircuitBreaker;
      @@ -66,7 +65,7 @@ import org.elasticsearch.discovery.DiscoverySettings;
       import org.elasticsearch.env.Environment;
       import org.elasticsearch.env.NodeEnvironment;
       import org.elasticsearch.http.HttpServerTransport;
      -import org.elasticsearch.index.IndexModule;
      +import org.elasticsearch.index.Index;
       import org.elasticsearch.index.IndexService;
       import org.elasticsearch.index.engine.CommitStats;
       import org.elasticsearch.index.engine.Engine;
      @@ -418,14 +417,6 @@ public final class InternalTestCluster extends TestCluster {
                   builder.put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING.getKey(), "noop");
               }
       
      -        if (random.nextBoolean()) {
      -            builder.put(IndexModule.INDEX_QUERY_CACHE_TYPE_SETTING.getKey(), random.nextBoolean() ? IndexModule.INDEX_QUERY_CACHE : IndexModule.NONE_QUERY_CACHE);
      -        }
      -
      -        if (random.nextBoolean()) {
      -            builder.put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), random.nextBoolean());
      -        }
      -
               if (random.nextBoolean()) {
                   if (random.nextInt(10) == 0) { // do something crazy slow here
                       builder.put(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING.getKey(), new ByteSizeValue(RandomInts.randomIntBetween(random, 1, 10), ByteSizeUnit.MB));
      @@ -456,9 +447,6 @@ public final class InternalTestCluster extends TestCluster {
                   builder.put(ScriptService.SCRIPT_CACHE_EXPIRE_SETTING.getKey(), TimeValue.timeValueMillis(RandomInts.randomIntBetween(random, 750, 10000000)));
               }
       
      -        // always default delayed allocation to 0 to make sure we have tests are not delayed
      -        builder.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), 0);
      -
               return builder.build();
           }
       
      @@ -590,7 +578,7 @@ public final class InternalTestCluster extends TestCluster {
                       .put(Environment.PATH_HOME_SETTING.getKey(), baseDir) // allow overriding path.home
                       .put(settings)
                       .put("node.name", name)
      -                .put(InternalClusterService.NODE_ID_SEED_SETTING.getKey(), seed)
      +                .put(DiscoveryNodeService.NODE_ID_SEED_SETTING.getKey(), seed)
                       .build();
               MockNode node = new MockNode(finalSettings, version, plugins);
               return new NodeAndClient(name, node);
      @@ -838,8 +826,8 @@ public final class InternalTestCluster extends TestCluster {
                           IOUtils.rm(nodeEnv.nodeDataPaths());
                       }
                   }
      -            final long newIdSeed = InternalClusterService.NODE_ID_SEED_SETTING.get(node.settings()) + 1; // use a new seed to make sure we have new node id
      -            Settings finalSettings = Settings.builder().put(node.settings()).put(newSettings).put(InternalClusterService.NODE_ID_SEED_SETTING.getKey(), newIdSeed).build();
      +            final long newIdSeed = DiscoveryNodeService.NODE_ID_SEED_SETTING.get(node.settings()) + 1; // use a new seed to make sure we have new node id
      +            Settings finalSettings = Settings.builder().put(node.settings()).put(newSettings).put(DiscoveryNodeService.NODE_ID_SEED_SETTING.getKey(), newIdSeed).build();
                   Collection> plugins = node.getPlugins();
                   Version version = node.getVersion();
                   node = new MockNode(finalSettings, version, plugins);
      @@ -1700,7 +1688,7 @@ public final class InternalTestCluster extends TestCluster {
               }
           }
       
      -    synchronized String routingKeyForShard(String index, String type, int shard, Random random) {
      +    synchronized String routingKeyForShard(Index index, String type, int shard, Random random) {
               assertThat(shard, greaterThanOrEqualTo(0));
               assertThat(shard, greaterThanOrEqualTo(0));
               for (NodeAndClient n : nodes.values()) {
      @@ -1713,7 +1701,7 @@ public final class InternalTestCluster extends TestCluster {
                       OperationRouting operationRouting = getInstanceFromNode(OperationRouting.class, node);
                       while (true) {
                           String routing = RandomStrings.randomAsciiOfLength(random, 10);
      -                    final int targetShard = operationRouting.indexShards(clusterService.state(), index, type, null, routing).shardId().getId();
      +                    final int targetShard = operationRouting.indexShards(clusterService.state(), index.getName(), type, null, routing).shardId().getId();
                           if (shard == targetShard) {
                               return routing;
                           }
      @@ -1852,7 +1840,7 @@ public final class InternalTestCluster extends TestCluster {
                       }
       
                       NodeService nodeService = getInstanceFromNode(NodeService.class, nodeAndClient.node);
      -                NodeStats stats = nodeService.stats(CommonStatsFlags.ALL, false, false, false, false, false, false, false, false, false, false);
      +                NodeStats stats = nodeService.stats(CommonStatsFlags.ALL, false, false, false, false, false, false, false, false, false, false, false);
                       assertThat("Fielddata size must be 0 on node: " + stats.getNode(), stats.getIndices().getFieldData().getMemorySizeInBytes(), equalTo(0L));
                       assertThat("Query cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getQueryCache().getMemorySizeInBytes(), equalTo(0L));
                       assertThat("FixedBitSet cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getSegments().getBitsetMemoryInBytes(), equalTo(0L));
      diff --git a/test/framework/src/main/java/org/elasticsearch/test/MockIndexEventListener.java b/test/framework/src/main/java/org/elasticsearch/test/MockIndexEventListener.java
      index 13f533a583e..f17fe024f14 100644
      --- a/test/framework/src/main/java/org/elasticsearch/test/MockIndexEventListener.java
      +++ b/test/framework/src/main/java/org/elasticsearch/test/MockIndexEventListener.java
      @@ -22,6 +22,7 @@ import org.elasticsearch.cluster.routing.ShardRouting;
       import org.elasticsearch.common.Nullable;
       import org.elasticsearch.common.inject.Module;
       import org.elasticsearch.common.settings.Setting;
      +import org.elasticsearch.common.settings.Setting.Property;
       import org.elasticsearch.common.settings.Settings;
       import org.elasticsearch.common.settings.SettingsModule;
       import org.elasticsearch.index.Index;
      @@ -63,7 +64,7 @@ public final class MockIndexEventListener {
               /**
                * For tests to pass in to fail on listener invocation
                */
      -        public static final Setting INDEX_FAIL = Setting.boolSetting("index.fail", false, false, Setting.Scope.INDEX);
      +        public static final Setting INDEX_FAIL = Setting.boolSetting("index.fail", false, Property.IndexScope);
               public void onModule(SettingsModule module) {
                   module.registerSetting(INDEX_FAIL);
               }
      diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java
      index 2c8a4c7e458..891353ef589 100644
      --- a/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java
      +++ b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java
      @@ -41,6 +41,7 @@ import org.elasticsearch.index.fielddata.IndexFieldDataService;
       import org.elasticsearch.index.mapper.MappedFieldType;
       import org.elasticsearch.index.mapper.MapperService;
       import org.elasticsearch.index.mapper.object.ObjectMapper;
      +import org.elasticsearch.index.percolator.PercolatorQueryCache;
       import org.elasticsearch.index.query.ParsedQuery;
       import org.elasticsearch.index.query.QueryShardContext;
       import org.elasticsearch.index.shard.IndexShard;
      @@ -78,6 +79,7 @@ public class TestSearchContext extends SearchContext {
           final IndexService indexService;
           final IndexFieldDataService indexFieldDataService;
           final BitsetFilterCache fixedBitSetFilterCache;
      +    final PercolatorQueryCache percolatorQueryCache;
           final ThreadPool threadPool;
           final Map, Collector> queryCollectors = new HashMap<>();
           final IndexShard indexShard;
      @@ -105,6 +107,7 @@ public class TestSearchContext extends SearchContext {
               this.indexService = indexService;
               this.indexFieldDataService = indexService.fieldData();
               this.fixedBitSetFilterCache = indexService.cache().bitsetFilterCache();
      +        this.percolatorQueryCache = indexService.cache().getPercolatorQueryCache();
               this.threadPool = threadPool;
               this.indexShard = indexService.getShardOrNull(0);
               this.scriptService = scriptService;
      @@ -119,6 +122,7 @@ public class TestSearchContext extends SearchContext {
               this.indexFieldDataService = null;
               this.threadPool = null;
               this.fixedBitSetFilterCache = null;
      +        this.percolatorQueryCache = null;
               this.indexShard = null;
               scriptService = null;
               this.queryShardContext = queryShardContext;
      @@ -330,6 +334,11 @@ public class TestSearchContext extends SearchContext {
               return indexFieldDataService;
           }
       
      +    @Override
      +    public PercolatorQueryCache percolatorQueryCache() {
      +        return percolatorQueryCache;
      +    }
      +
           @Override
           public long timeoutInMillis() {
               return 0;
      diff --git a/test/framework/src/main/java/org/elasticsearch/test/cluster/NoopClusterService.java b/test/framework/src/main/java/org/elasticsearch/test/cluster/NoopClusterService.java
      deleted file mode 100644
      index 99ba809c144..00000000000
      --- a/test/framework/src/main/java/org/elasticsearch/test/cluster/NoopClusterService.java
      +++ /dev/null
      @@ -1,190 +0,0 @@
      -/*
      - * Licensed to Elasticsearch under one or more contributor
      - * license agreements. See the NOTICE file distributed with
      - * this work for additional information regarding copyright
      - * ownership. Elasticsearch licenses this file to you under
      - * the Apache License, Version 2.0 (the "License"); you may
      - * not use this file except in compliance with the License.
      - * You may obtain a copy of the License at
      - *
      - *    http://www.apache.org/licenses/LICENSE-2.0
      - *
      - * Unless required by applicable law or agreed to in writing,
      - * software distributed under the License is distributed on an
      - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
      - * KIND, either express or implied.  See the License for the
      - * specific language governing permissions and limitations
      - * under the License.
      - */
      -package org.elasticsearch.test.cluster;
      -
      -import org.elasticsearch.Version;
      -import org.elasticsearch.cluster.ClusterName;
      -import org.elasticsearch.cluster.ClusterService;
      -import org.elasticsearch.cluster.ClusterState;
      -import org.elasticsearch.cluster.ClusterStateListener;
      -import org.elasticsearch.cluster.ClusterStateTaskConfig;
      -import org.elasticsearch.cluster.ClusterStateTaskExecutor;
      -import org.elasticsearch.cluster.ClusterStateTaskListener;
      -import org.elasticsearch.cluster.ClusterStateUpdateTask;
      -import org.elasticsearch.cluster.LocalNodeMasterListener;
      -import org.elasticsearch.cluster.TimeoutClusterStateListener;
      -import org.elasticsearch.cluster.block.ClusterBlock;
      -import org.elasticsearch.cluster.node.DiscoveryNode;
      -import org.elasticsearch.cluster.node.DiscoveryNodes;
      -import org.elasticsearch.cluster.routing.OperationRouting;
      -import org.elasticsearch.cluster.service.PendingClusterTask;
      -import org.elasticsearch.common.component.Lifecycle;
      -import org.elasticsearch.common.component.LifecycleListener;
      -import org.elasticsearch.common.transport.DummyTransportAddress;
      -import org.elasticsearch.common.unit.TimeValue;
      -import org.elasticsearch.tasks.TaskManager;
      -
      -import java.util.List;
      -
      -public class NoopClusterService implements ClusterService {
      -
      -    final ClusterState state;
      -
      -    public NoopClusterService() {
      -        this(ClusterState.builder(new ClusterName("noop")).build());
      -    }
      -
      -    public NoopClusterService(ClusterState state) {
      -        if (state.getNodes().size() == 0) {
      -            state = ClusterState.builder(state).nodes(
      -                    DiscoveryNodes.builder()
      -                            .put(new DiscoveryNode("noop_id", DummyTransportAddress.INSTANCE, Version.CURRENT))
      -                            .localNodeId("noop_id")).build();
      -        }
      -
      -        assert state.getNodes().localNode() != null;
      -        this.state = state;
      -
      -    }
      -
      -    @Override
      -    public DiscoveryNode localNode() {
      -        return state.getNodes().localNode();
      -    }
      -
      -    @Override
      -    public ClusterState state() {
      -        return state;
      -    }
      -
      -    @Override
      -    public void addInitialStateBlock(ClusterBlock block) throws IllegalStateException {
      -
      -    }
      -
      -    @Override
      -    public void removeInitialStateBlock(ClusterBlock block) throws IllegalStateException {
      -
      -    }
      -
      -    @Override
      -    public void removeInitialStateBlock(int blockId) throws IllegalStateException {
      -
      -    }
      -
      -    @Override
      -    public OperationRouting operationRouting() {
      -        return null;
      -    }
      -
      -    @Override
      -    public void addFirst(ClusterStateListener listener) {
      -
      -    }
      -
      -    @Override
      -    public void addLast(ClusterStateListener listener) {
      -
      -    }
      -
      -    @Override
      -    public void add(ClusterStateListener listener) {
      -
      -    }
      -
      -    @Override
      -    public void remove(ClusterStateListener listener) {
      -
      -    }
      -
      -    @Override
      -    public void add(LocalNodeMasterListener listener) {
      -
      -    }
      -
      -    @Override
      -    public void remove(LocalNodeMasterListener listener) {
      -
      -    }
      -
      -    @Override
      -    public void add(TimeValue timeout, TimeoutClusterStateListener listener) {
      -
      -    }
      -
      -    @Override
      -    public void submitStateUpdateTask(String source, ClusterStateUpdateTask updateTask) {
      -
      -    }
      -
      -    @Override
      -    public  void submitStateUpdateTask(String source, T task, ClusterStateTaskConfig config, ClusterStateTaskExecutor executor, ClusterStateTaskListener listener) {
      -
      -    }
      -
      -    @Override
      -    public List pendingTasks() {
      -        return null;
      -    }
      -
      -    @Override
      -    public int numberOfPendingTasks() {
      -        return 0;
      -    }
      -
      -    @Override
      -    public TimeValue getMaxTaskWaitTime() {
      -        return TimeValue.timeValueMillis(0);
      -    }
      -
      -    @Override
      -    public TaskManager getTaskManager() {
      -        return null;
      -    }
      -
      -    @Override
      -    public Lifecycle.State lifecycleState() {
      -        return null;
      -    }
      -
      -    @Override
      -    public void addLifecycleListener(LifecycleListener listener) {
      -
      -    }
      -
      -    @Override
      -    public void removeLifecycleListener(LifecycleListener listener) {
      -
      -    }
      -
      -    @Override
      -    public ClusterService start() {
      -        return null;
      -    }
      -
      -    @Override
      -    public ClusterService stop() {
      -        return null;
      -    }
      -
      -    @Override
      -    public void close() {
      -
      -    }
      -}
      diff --git a/test/framework/src/main/java/org/elasticsearch/test/cluster/TestClusterService.java b/test/framework/src/main/java/org/elasticsearch/test/cluster/TestClusterService.java
      deleted file mode 100644
      index 3b1082cae44..00000000000
      --- a/test/framework/src/main/java/org/elasticsearch/test/cluster/TestClusterService.java
      +++ /dev/null
      @@ -1,315 +0,0 @@
      -/*
      - * Licensed to Elasticsearch under one or more contributor
      - * license agreements. See the NOTICE file distributed with
      - * this work for additional information regarding copyright
      - * ownership. Elasticsearch licenses this file to you under
      - * the Apache License, Version 2.0 (the "License"); you may
      - * not use this file except in compliance with the License.
      - * You may obtain a copy of the License at
      - *
      - *    http://www.apache.org/licenses/LICENSE-2.0
      - *
      - * Unless required by applicable law or agreed to in writing,
      - * software distributed under the License is distributed on an
      - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
      - * KIND, either express or implied.  See the License for the
      - * specific language governing permissions and limitations
      - * under the License.
      - */
      -package org.elasticsearch.test.cluster;
      -
      -import org.elasticsearch.ElasticsearchException;
      -import org.elasticsearch.Version;
      -import org.elasticsearch.cluster.ClusterChangedEvent;
      -import org.elasticsearch.cluster.ClusterName;
      -import org.elasticsearch.cluster.ClusterService;
      -import org.elasticsearch.cluster.ClusterState;
      -import org.elasticsearch.cluster.ClusterStateListener;
      -import org.elasticsearch.cluster.ClusterStateTaskConfig;
      -import org.elasticsearch.cluster.ClusterStateTaskExecutor;
      -import org.elasticsearch.cluster.ClusterStateTaskListener;
      -import org.elasticsearch.cluster.ClusterStateUpdateTask;
      -import org.elasticsearch.cluster.LocalNodeMasterListener;
      -import org.elasticsearch.cluster.TimeoutClusterStateListener;
      -import org.elasticsearch.cluster.block.ClusterBlock;
      -import org.elasticsearch.cluster.node.DiscoveryNode;
      -import org.elasticsearch.cluster.node.DiscoveryNodes;
      -import org.elasticsearch.cluster.routing.OperationRouting;
      -import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider;
      -import org.elasticsearch.cluster.service.PendingClusterTask;
      -import org.elasticsearch.common.Nullable;
      -import org.elasticsearch.common.component.Lifecycle;
      -import org.elasticsearch.common.component.LifecycleListener;
      -import org.elasticsearch.common.logging.ESLogger;
      -import org.elasticsearch.common.logging.Loggers;
      -import org.elasticsearch.common.settings.Settings;
      -import org.elasticsearch.common.transport.DummyTransportAddress;
      -import org.elasticsearch.common.unit.TimeValue;
      -import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
      -import org.elasticsearch.common.util.concurrent.FutureUtils;
      -import org.elasticsearch.tasks.TaskManager;
      -import org.elasticsearch.threadpool.ThreadPool;
      -import org.elasticsearch.transport.TransportService;
      -
      -import java.util.Arrays;
      -import java.util.Iterator;
      -import java.util.List;
      -import java.util.Queue;
      -import java.util.concurrent.CopyOnWriteArrayList;
      -import java.util.concurrent.ScheduledFuture;
      -
      -/** a class that simulate simple cluster service features, like state storage and listeners */
      -public class TestClusterService implements ClusterService {
      -
      -    volatile ClusterState state;
      -    private volatile TaskManager taskManager;
      -    private final List listeners = new CopyOnWriteArrayList<>();
      -    private final Queue onGoingTimeouts = ConcurrentCollections.newQueue();
      -    private final ThreadPool threadPool;
      -    private final ESLogger logger = Loggers.getLogger(getClass(), Settings.EMPTY);
      -    private final OperationRouting operationRouting = new OperationRouting(Settings.Builder.EMPTY_SETTINGS, new AwarenessAllocationDecider());
      -
      -    public TestClusterService() {
      -        this(ClusterState.builder(new ClusterName("test")).build());
      -    }
      -
      -    public TestClusterService(ThreadPool threadPool) {
      -        this(ClusterState.builder(new ClusterName("test")).build(), threadPool);
      -        taskManager = new TaskManager(Settings.EMPTY);
      -    }
      -
      -    public TestClusterService(ThreadPool threadPool, TransportService transportService) {
      -        this(ClusterState.builder(new ClusterName("test")).build(), threadPool);
      -        taskManager = transportService.getTaskManager();
      -    }
      -
      -    public TestClusterService(ClusterState state) {
      -        this(state, null);
      -    }
      -
      -    public TestClusterService(ClusterState state, @Nullable ThreadPool threadPool) {
      -        if (state.getNodes().size() == 0) {
      -            state = ClusterState.builder(state).nodes(
      -                    DiscoveryNodes.builder()
      -                            .put(new DiscoveryNode("test_node", DummyTransportAddress.INSTANCE, Version.CURRENT))
      -                            .localNodeId("test_node")).build();
      -        }
      -
      -        assert state.getNodes().localNode() != null;
      -        this.state = state;
      -        this.threadPool = threadPool;
      -
      -    }
      -
      -
      -    /** set the current state and trigger any registered listeners about the change, mimicking an update task */
      -    synchronized public ClusterState setState(ClusterState state) {
      -        assert state.getNodes().localNode() != null;
      -        // make sure we have a version increment
      -        state = ClusterState.builder(state).version(this.state.version() + 1).build();
      -        return setStateAndNotifyListeners(state);
      -    }
      -
      -    private ClusterState setStateAndNotifyListeners(ClusterState state) {
      -        ClusterChangedEvent event = new ClusterChangedEvent("test", state, this.state);
      -        this.state = state;
      -        for (ClusterStateListener listener : listeners) {
      -            listener.clusterChanged(event);
      -        }
      -        return state;
      -    }
      -
      -    /** set the current state and trigger any registered listeners about the change */
      -    public ClusterState setState(ClusterState.Builder state) {
      -        return setState(state.build());
      -    }
      -
      -    @Override
      -    public DiscoveryNode localNode() {
      -        return state.getNodes().localNode();
      -    }
      -
      -    @Override
      -    public ClusterState state() {
      -        return state;
      -    }
      -
      -    @Override
      -    public void addInitialStateBlock(ClusterBlock block) throws IllegalStateException {
      -        throw new UnsupportedOperationException();
      -
      -    }
      -
      -    @Override
      -    public void removeInitialStateBlock(ClusterBlock block) throws IllegalStateException {
      -        throw new UnsupportedOperationException();
      -    }
      -
      -    @Override
      -    public void removeInitialStateBlock(int blockId) throws IllegalStateException {
      -        throw new UnsupportedOperationException();
      -    }
      -
      -    @Override
      -    public OperationRouting operationRouting() {
      -        return operationRouting;
      -    }
      -
      -    @Override
      -    public void addFirst(ClusterStateListener listener) {
      -        listeners.add(0, listener);
      -    }
      -
      -    @Override
      -    public void addLast(ClusterStateListener listener) {
      -        listeners.add(listener);
      -    }
      -
      -    @Override
      -    public void add(ClusterStateListener listener) {
      -        listeners.add(listener);
      -    }
      -
      -    @Override
      -    public void remove(ClusterStateListener listener) {
      -        listeners.remove(listener);
      -        for (Iterator it = onGoingTimeouts.iterator(); it.hasNext(); ) {
      -            NotifyTimeout timeout = it.next();
      -            if (timeout.listener.equals(listener)) {
      -                timeout.cancel();
      -                it.remove();
      -            }
      -        }
      -    }
      -
      -    @Override
      -    public void add(LocalNodeMasterListener listener) {
      -        throw new UnsupportedOperationException();
      -    }
      -
      -    @Override
      -    public void remove(LocalNodeMasterListener listener) {
      -        throw new UnsupportedOperationException();
      -    }
      -
      -    @Override
      -    public void add(final TimeValue timeout, final TimeoutClusterStateListener listener) {
      -        if (threadPool == null) {
      -            throw new UnsupportedOperationException("TestClusterService wasn't initialized with a thread pool");
      -        }
      -        if (timeout != null) {
      -            NotifyTimeout notifyTimeout = new NotifyTimeout(listener, timeout);
      -            notifyTimeout.future = threadPool.schedule(timeout, ThreadPool.Names.GENERIC, notifyTimeout);
      -            onGoingTimeouts.add(notifyTimeout);
      -        }
      -        listeners.add(listener);
      -        listener.postAdded();
      -    }
      -
      -    @Override
      -    public void submitStateUpdateTask(String source, ClusterStateUpdateTask updateTask) {
      -        submitStateUpdateTask(source, null, updateTask, updateTask, updateTask);
      -    }
      -
      -    @Override
      -    synchronized public  void submitStateUpdateTask(String source, T task, ClusterStateTaskConfig config, ClusterStateTaskExecutor executor, ClusterStateTaskListener listener) {
      -        logger.debug("processing [{}]", source);
      -        if (state().nodes().localNodeMaster() == false && executor.runOnlyOnMaster()) {
      -            listener.onNoLongerMaster(source);
      -            logger.debug("failed [{}], no longer master", source);
      -            return;
      -        }
      -        ClusterStateTaskExecutor.BatchResult batchResult;
      -        ClusterState previousClusterState = state;
      -        try {
      -            batchResult = executor.execute(previousClusterState, Arrays.asList(task));
      -        } catch (Exception e) {
      -            batchResult = ClusterStateTaskExecutor.BatchResult.builder().failure(task, e).build(previousClusterState);
      -        }
      -
      -        batchResult.executionResults.get(task).handle(
      -                () -> {},
      -                ex -> listener.onFailure(source, new ElasticsearchException("failed to process cluster state update task [" + source + "]", ex))
      -        );
      -
      -        setStateAndNotifyListeners(batchResult.resultingState);
      -        listener.clusterStateProcessed(source, previousClusterState, batchResult.resultingState);
      -        logger.debug("finished [{}]", source);
      -
      -    }
      -
      -    @Override
      -    public TimeValue getMaxTaskWaitTime() {
      -        throw new UnsupportedOperationException();
      -    }
      -
      -    @Override
      -    public TaskManager getTaskManager() {
      -        return taskManager;
      -    }
      -
      -    @Override
      -    public List pendingTasks() {
      -        throw new UnsupportedOperationException();
      -
      -    }
      -
      -    @Override
      -    public int numberOfPendingTasks() {
      -        throw new UnsupportedOperationException();
      -    }
      -
      -    @Override
      -    public Lifecycle.State lifecycleState() {
      -        throw new UnsupportedOperationException();
      -    }
      -
      -    @Override
      -    public void addLifecycleListener(LifecycleListener listener) {
      -        throw new UnsupportedOperationException();
      -    }
      -
      -    @Override
      -    public void removeLifecycleListener(LifecycleListener listener) {
      -        throw new UnsupportedOperationException();
      -    }
      -
      -    @Override
      -    public ClusterService start() throws ElasticsearchException {
      -        throw new UnsupportedOperationException();
      -    }
      -
      -    @Override
      -    public ClusterService stop() throws ElasticsearchException {
      -        throw new UnsupportedOperationException();
      -    }
      -
      -    @Override
      -    public void close() throws ElasticsearchException {
      -        throw new UnsupportedOperationException();
      -    }
      -
      -    class NotifyTimeout implements Runnable {
      -        final TimeoutClusterStateListener listener;
      -        final TimeValue timeout;
      -        volatile ScheduledFuture future;
      -
      -        NotifyTimeout(TimeoutClusterStateListener listener, TimeValue timeout) {
      -            this.listener = listener;
      -            this.timeout = timeout;
      -        }
      -
      -        public void cancel() {
      -            FutureUtils.cancel(future);
      -        }
      -
      -        @Override
      -        public void run() {
      -            if (future != null && future.isCancelled()) {
      -                return;
      -            }
      -            listener.onTimeout(this.timeout);
      -            // note, we rely on the listener to remove itself in case of timeout if needed
      -        }
      -    }
      -}
      diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/BlockClusterStateProcessing.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/BlockClusterStateProcessing.java
      index e318843e84f..cbcb9766943 100644
      --- a/test/framework/src/main/java/org/elasticsearch/test/disruption/BlockClusterStateProcessing.java
      +++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/BlockClusterStateProcessing.java
      @@ -18,9 +18,9 @@
        */
       package org.elasticsearch.test.disruption;
       
      -import org.elasticsearch.cluster.ClusterService;
       import org.elasticsearch.cluster.ClusterState;
       import org.elasticsearch.cluster.ClusterStateUpdateTask;
      +import org.elasticsearch.cluster.service.ClusterService;
       import org.elasticsearch.common.Priority;
       import org.elasticsearch.common.unit.TimeValue;
       import org.elasticsearch.test.InternalTestCluster;
      @@ -56,7 +56,7 @@ public class BlockClusterStateProcessing extends SingleNodeDisruption {
               }
               logger.info("delaying cluster state updates on node [{}]", disruptionNodeCopy);
               boolean success = disruptionLatch.compareAndSet(null, new CountDownLatch(1));
      -        assert success : "startDisrupting called without waiting on stopDistrupting to complete";
      +        assert success : "startDisrupting called without waiting on stopDisrupting to complete";
               final CountDownLatch started = new CountDownLatch(1);
               clusterService.submitStateUpdateTask("service_disruption_block", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
       
      diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/SlowClusterStateProcessing.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/SlowClusterStateProcessing.java
      index b9c663686b1..be0b69a8e8b 100644
      --- a/test/framework/src/main/java/org/elasticsearch/test/disruption/SlowClusterStateProcessing.java
      +++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/SlowClusterStateProcessing.java
      @@ -18,9 +18,9 @@
        */
       package org.elasticsearch.test.disruption;
       
      -import org.elasticsearch.cluster.ClusterService;
       import org.elasticsearch.cluster.ClusterState;
       import org.elasticsearch.cluster.ClusterStateUpdateTask;
      +import org.elasticsearch.cluster.service.ClusterService;
       import org.elasticsearch.common.Priority;
       import org.elasticsearch.common.unit.TimeValue;
       import org.elasticsearch.test.InternalTestCluster;
      diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java
      index ddccfe88e38..bf32b6b8575 100644
      --- a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java
      +++ b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java
      @@ -31,6 +31,7 @@ import org.elasticsearch.ElasticsearchException;
       import org.elasticsearch.common.logging.ESLogger;
       import org.elasticsearch.common.logging.Loggers;
       import org.elasticsearch.common.settings.Setting;
      +import org.elasticsearch.common.settings.Setting.Property;
       import org.elasticsearch.common.settings.Settings;
       import org.elasticsearch.index.engine.Engine;
       import org.elasticsearch.index.engine.EngineConfig;
      @@ -55,11 +56,13 @@ public final class MockEngineSupport {
            * Allows tests to wrap an index reader randomly with a given ratio. This is disabled by default ie. 0.0d since reader wrapping is insanely
            * slow if {@link org.apache.lucene.index.AssertingDirectoryReader} is used.
            */
      -    public static final Setting WRAP_READER_RATIO = Setting.doubleSetting("index.engine.mock.random.wrap_reader_ratio", 0.0d, 0.0d, false, Setting.Scope.INDEX);
      +    public static final Setting WRAP_READER_RATIO =
      +        Setting.doubleSetting("index.engine.mock.random.wrap_reader_ratio", 0.0d, 0.0d, Property.IndexScope);
           /**
            * Allows tests to prevent an engine from being flushed on close ie. to test translog recovery...
            */
      -    public static final Setting DISABLE_FLUSH_ON_CLOSE = Setting.boolSetting("index.mock.disable_flush_on_close", false, false, Setting.Scope.INDEX);
      +    public static final Setting DISABLE_FLUSH_ON_CLOSE =
      +        Setting.boolSetting("index.mock.disable_flush_on_close", false, Property.IndexScope);
       
       
           private final AtomicBoolean closing = new AtomicBoolean(false);
      diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java
      index cb3bbc7436b..7adf6d2b4b9 100644
      --- a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java
      +++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java
      @@ -144,7 +144,7 @@ public class ElasticsearchAssertions {
           }
       
           /**
      -     * Checks that all shard requests of a replicated brodcast request failed due to a cluster block
      +     * Checks that all shard requests of a replicated broadcast request failed due to a cluster block
            *
            * @param replicatedBroadcastResponse the response that should only contain failed shard responses
            *
      @@ -716,7 +716,7 @@ public class ElasticsearchAssertions {
       
           /**
            * Applies basic assertions on the SearchResponse. This method checks if all shards were successful, if
      -     * any of the shards threw an exception and if the response is serializeable.
      +     * any of the shards threw an exception and if the response is serializable.
            */
           public static SearchResponse assertSearchResponse(SearchRequestBuilder request) {
               return assertSearchResponse(request.get());
      @@ -724,7 +724,7 @@ public class ElasticsearchAssertions {
       
           /**
            * Applies basic assertions on the SearchResponse. This method checks if all shards were successful, if
      -     * any of the shards threw an exception and if the response is serializeable.
      +     * any of the shards threw an exception and if the response is serializable.
            */
           public static SearchResponse assertSearchResponse(SearchResponse response) {
               assertNoFailures(response);
      diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java
      index 5684717342d..fbc518b136d 100644
      --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java
      +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java
      @@ -19,14 +19,34 @@
       
       package org.elasticsearch.test.rest;
       
      -import com.carrotsearch.randomizedtesting.RandomizedTest;
      +import java.io.IOException;
      +import java.io.InputStream;
      +import java.net.URI;
      +import java.net.URISyntaxException;
      +import java.net.URL;
      +import java.nio.file.FileSystem;
      +import java.nio.file.FileSystems;
      +import java.nio.file.Files;
      +import java.nio.file.Path;
      +import java.nio.file.StandardCopyOption;
      +import java.util.ArrayList;
      +import java.util.Collections;
      +import java.util.Comparator;
      +import java.util.HashMap;
      +import java.util.HashSet;
      +import java.util.List;
      +import java.util.Map;
      +import java.util.Set;
      +
       import org.apache.lucene.util.IOUtils;
      +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction;
       import org.elasticsearch.common.Strings;
       import org.elasticsearch.common.SuppressForbidden;
       import org.elasticsearch.common.settings.Settings;
       import org.elasticsearch.common.xcontent.XContentHelper;
       import org.elasticsearch.test.ESTestCase;
       import org.elasticsearch.test.rest.client.RestException;
      +import org.elasticsearch.test.rest.client.RestResponse;
       import org.elasticsearch.test.rest.parser.RestTestParseException;
       import org.elasticsearch.test.rest.parser.RestTestSuiteParser;
       import org.elasticsearch.test.rest.section.DoSection;
      @@ -42,24 +62,11 @@ import org.junit.AfterClass;
       import org.junit.Before;
       import org.junit.BeforeClass;
       
      -import java.io.IOException;
      -import java.io.InputStream;
      -import java.net.InetSocketAddress;
      -import java.net.URI;
      -import java.net.URISyntaxException;
      -import java.net.URL;
      -import java.nio.file.FileSystem;
      -import java.nio.file.FileSystems;
      -import java.nio.file.Files;
      -import java.nio.file.Path;
      -import java.nio.file.StandardCopyOption;
      -import java.util.ArrayList;
      -import java.util.Collections;
      -import java.util.Comparator;
      -import java.util.HashMap;
      -import java.util.List;
      -import java.util.Map;
      -import java.util.Set;
      +import com.carrotsearch.randomizedtesting.RandomizedTest;
      +
      +import static java.util.Collections.emptyList;
      +import static java.util.Collections.emptyMap;
      +import static java.util.Collections.sort;
       
       /**
        * Runs the clients test suite against an elasticsearch cluster.
      @@ -261,7 +268,6 @@ public abstract class ESRestTestCase extends ESTestCase {
       
           @After
           public void wipeCluster() throws Exception {
      -
               // wipe indices
               Map deleteIndicesArgs = new HashMap<>();
               deleteIndicesArgs.put("index", "*");
      @@ -285,6 +291,30 @@ public abstract class ESRestTestCase extends ESTestCase {
               adminExecutionContext.callApi("snapshot.delete_repository", deleteSnapshotsArgs, Collections.emptyList(), Collections.emptyMap());
           }
       
      +    /**
      +     * Logs a message if there are still running tasks. The reasoning is that any tasks still running are state the is trying to bleed into
      +     * other tests.
      +     */
      +    @After
      +    public void logIfThereAreRunningTasks() throws InterruptedException, IOException, RestException {
      +        RestResponse tasks = adminExecutionContext.callApi("tasks.list", emptyMap(), emptyList(), emptyMap());
      +        Set runningTasks = runningTasks(tasks);
      +        // Ignore the task list API - it doens't count against us
      +        runningTasks.remove(ListTasksAction.NAME);
      +        runningTasks.remove(ListTasksAction.NAME + "[n]");
      +        if (runningTasks.isEmpty()) {
      +            return;
      +        }
      +        List stillRunning = new ArrayList<>(runningTasks);
      +        sort(stillRunning);
      +        logger.info("There are still tasks running after this test that might break subsequent tests {}.", stillRunning);
      +        /*
      +         * This isn't a higher level log or outright failure because some of these tasks are run by the cluster in the background. If we
      +         * could determine that some tasks are run by the user we'd fail the tests if those tasks were running and ignore any background
      +         * tasks.
      +         */
      +    }
      +
           @AfterClass
           public static void close() {
               if (restTestExecutionContext != null) {
      @@ -365,4 +395,19 @@ public abstract class ESRestTestCase extends ESTestCase {
                   executableSection.execute(restTestExecutionContext);
               }
           }
      +
      +    @SuppressWarnings("unchecked")
      +    public Set runningTasks(RestResponse response) throws IOException {
      +        Set runningTasks = new HashSet<>();
      +        Map nodes = (Map) response.evaluate("nodes");
      +        for (Map.Entry node : nodes.entrySet()) {
      +            Map nodeInfo = (Map) node.getValue();
      +            Map nodeTasks = (Map) nodeInfo.get("tasks");
      +            for (Map.Entry taskAndName : nodeTasks.entrySet()) {
      +                Map task = (Map) taskAndName.getValue();
      +                runningTasks.add(task.get("action").toString());
      +            }
      +        }
      +        return runningTasks;
      +    }
       }
      diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestClient.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestClient.java
      index e5597713570..e798fd8c8ab 100644
      --- a/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestClient.java
      +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestClient.java
      @@ -151,8 +151,7 @@ public class RestClient implements Closeable {
       
               HttpRequestBuilder httpRequestBuilder = callApiBuilder(apiName, requestParams, body);
               for (Map.Entry header : headers.entrySet()) {
      -            logger.error("Adding header " + header.getKey());
      -            logger.error(" with value " + header.getValue());
      +            logger.error("Adding header {}\n with value {}", header.getKey(), header.getValue());
                   httpRequestBuilder.addHeader(header.getKey(), header.getValue());
               }
               logger.debug("calling api [{}]", apiName);
      diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java
      index 6a484e9ae69..79f7502fb27 100644
      --- a/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java
      +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java
      @@ -114,9 +114,10 @@ public class HttpRequestBuilder {
               for (String pathPart : path) {
                   try {
                       finalPath.append('/');
      -                URI uri = new URI(null, null, null, -1, pathPart, null, null);
      +                // We append "/" to the path part to handle parts that start with - or other invalid characters
      +                URI uri = new URI(null, null, null, -1, "/" + pathPart, null, null);
                       //manually escape any slash that each part may contain
      -                finalPath.append(uri.getRawPath().replaceAll("/", "%2F"));
      +                finalPath.append(uri.getRawPath().substring(1).replaceAll("/", "%2F"));
                   } catch(URISyntaxException e) {
                       throw new RuntimeException("unable to build uri", e);
                   }
      diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpResponse.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpResponse.java
      index 9945edbefa9..37fc163ac61 100644
      --- a/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpResponse.java
      +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpResponse.java
      @@ -61,7 +61,7 @@ public class HttpResponse {
                       try {
                           httpResponse.close();
                       } catch (IOException e) {
      -                    logger.error(e.getMessage(), e);
      +                    logger.error("Failed closing response", e);
                       }
                   }
               } else {
      diff --git a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java
      index ef3be122cdb..7d17746d54f 100644
      --- a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java
      +++ b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java
      @@ -23,7 +23,6 @@ import com.carrotsearch.randomizedtesting.SeedUtils;
       import com.carrotsearch.randomizedtesting.generators.RandomPicks;
       
       import org.apache.lucene.index.CheckIndex;
      -import org.apache.lucene.index.IndexWriter;
       import org.apache.lucene.store.BaseDirectoryWrapper;
       import org.apache.lucene.store.Directory;
       import org.apache.lucene.store.LockFactory;
      @@ -32,13 +31,13 @@ import org.apache.lucene.store.MockDirectoryWrapper;
       import org.apache.lucene.store.StoreRateLimiting;
       import org.apache.lucene.util.LuceneTestCase;
       import org.apache.lucene.util.TestRuleMarkFailure;
      -import org.elasticsearch.cluster.metadata.AliasOrIndex;
       import org.elasticsearch.cluster.metadata.IndexMetaData;
       import org.elasticsearch.common.inject.Inject;
       import org.elasticsearch.common.io.stream.BytesStreamOutput;
       import org.elasticsearch.common.logging.ESLogger;
       import org.elasticsearch.common.lucene.Lucene;
       import org.elasticsearch.common.settings.Setting;
      +import org.elasticsearch.common.settings.Setting.Property;
       import org.elasticsearch.common.settings.Settings;
       import org.elasticsearch.index.IndexModule;
       import org.elasticsearch.index.IndexSettings;
      @@ -57,17 +56,20 @@ import java.io.PrintStream;
       import java.nio.charset.StandardCharsets;
       import java.nio.file.Path;
       import java.util.Arrays;
      -import java.util.Collections;
       import java.util.Random;
      -import java.util.Set;
       
       public class MockFSDirectoryService extends FsDirectoryService {
       
      -    public static final Setting RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING = Setting.doubleSetting("index.store.mock.random.io_exception_rate_on_open", 0.0d,  0.0d, false, Setting.Scope.INDEX);
      -    public static final Setting RANDOM_IO_EXCEPTION_RATE_SETTING = Setting.doubleSetting("index.store.mock.random.io_exception_rate", 0.0d,  0.0d, false, Setting.Scope.INDEX);
      -    public static final Setting RANDOM_PREVENT_DOUBLE_WRITE_SETTING = Setting.boolSetting("index.store.mock.random.prevent_double_write", true, false, Setting.Scope.INDEX);// true is default in MDW
      -    public static final Setting RANDOM_NO_DELETE_OPEN_FILE_SETTING = Setting.boolSetting("index.store.mock.random.no_delete_open_file", true, false, Setting.Scope.INDEX);// true is default in MDW
      -    public static final Setting CRASH_INDEX_SETTING = Setting.boolSetting("index.store.mock.random.crash_index", true, false, Setting.Scope.INDEX);// true is default in MDW
      +    public static final Setting RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING =
      +        Setting.doubleSetting("index.store.mock.random.io_exception_rate_on_open", 0.0d,  0.0d, Property.IndexScope, Property.NodeScope);
      +    public static final Setting RANDOM_IO_EXCEPTION_RATE_SETTING =
      +        Setting.doubleSetting("index.store.mock.random.io_exception_rate", 0.0d,  0.0d, Property.IndexScope, Property.NodeScope);
      +    public static final Setting RANDOM_PREVENT_DOUBLE_WRITE_SETTING =
      +        Setting.boolSetting("index.store.mock.random.prevent_double_write", true, Property.IndexScope, Property.NodeScope);// true is default in MDW
      +    public static final Setting RANDOM_NO_DELETE_OPEN_FILE_SETTING =
      +        Setting.boolSetting("index.store.mock.random.no_delete_open_file", true, Property.IndexScope, Property.NodeScope);// true is default in MDW
      +    public static final Setting CRASH_INDEX_SETTING =
      +        Setting.boolSetting("index.store.mock.random.crash_index", true, Property.IndexScope, Property.NodeScope);// true is default in MDW
       
           private final FsDirectoryService delegateService;
           private final Random random;
      @@ -173,8 +175,7 @@ public class MockFSDirectoryService extends FsDirectoryService {
               w.setCheckIndexOnClose(false); // we do this on the index level
               w.setPreventDoubleWrite(preventDoubleWrite);
               // TODO: make this test robust to virus scanner
      -        w.setEnableVirusScanner(false);
      -        w.setNoDeleteOpenFile(noDeleteOpenFile);
      +        w.setAssertNoDeleteOpenFile(false);
               w.setUseSlowOpenClosers(false);
               LuceneTestCase.closeAfterSuite(new CloseableDirectory(w));
               return w;
      diff --git a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java
      index 80251d54951..d44cf60e9e3 100644
      --- a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java
      +++ b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java
      @@ -23,6 +23,7 @@ import org.elasticsearch.common.Nullable;
       import org.elasticsearch.common.logging.ESLogger;
       import org.elasticsearch.common.logging.Loggers;
       import org.elasticsearch.common.settings.Setting;
      +import org.elasticsearch.common.settings.Setting.Property;
       import org.elasticsearch.common.settings.Settings;
       import org.elasticsearch.common.settings.SettingsModule;
       import org.elasticsearch.index.IndexModule;
      @@ -44,7 +45,8 @@ import java.util.Map;
       
       public class MockFSIndexStore extends IndexStore {
       
      -    public static final Setting INDEX_CHECK_INDEX_ON_CLOSE_SETTING = Setting.boolSetting("index.store.mock.check_index_on_close", true, false, Setting.Scope.INDEX);
      +    public static final Setting INDEX_CHECK_INDEX_ON_CLOSE_SETTING =
      +        Setting.boolSetting("index.store.mock.check_index_on_close", true, Property.IndexScope, Property.NodeScope);
       
           public static class TestPlugin extends Plugin {
               @Override
      diff --git a/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java b/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java
      index 4c48f990d6a..fc090e151a3 100644
      --- a/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java
      +++ b/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java
      @@ -20,6 +20,7 @@
       package org.elasticsearch.test.tasks;
       
       import org.elasticsearch.common.settings.Setting;
      +import org.elasticsearch.common.settings.Setting.Property;
       import org.elasticsearch.common.settings.Settings;
       import org.elasticsearch.tasks.Task;
       import org.elasticsearch.tasks.TaskManager;
      @@ -33,7 +34,8 @@ import java.util.concurrent.CopyOnWriteArrayList;
        */
       public class MockTaskManager extends TaskManager {
       
      -    public static final Setting USE_MOCK_TASK_MANAGER_SETTING = Setting.boolSetting("tests.mock.taskmanager.enabled", false, false, Setting.Scope.CLUSTER);
      +    public static final Setting USE_MOCK_TASK_MANAGER_SETTING =
      +        Setting.boolSetting("tests.mock.taskmanager.enabled", false, Property.NodeScope);
       
           private final Collection listeners = new CopyOnWriteArrayList<>();
       
      diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java
      index fb310239155..322882a7b3c 100644
      --- a/test/framework/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java
      +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java
      @@ -25,6 +25,7 @@ import org.elasticsearch.common.inject.Inject;
       import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
       import org.elasticsearch.common.network.NetworkModule;
       import org.elasticsearch.common.settings.Setting;
      +import org.elasticsearch.common.settings.Setting.Property;
       import org.elasticsearch.common.settings.Settings;
       import org.elasticsearch.common.settings.SettingsModule;
       import org.elasticsearch.plugins.Plugin;
      @@ -67,10 +68,12 @@ public class AssertingLocalTransport extends LocalTransport {
               }
           }
       
      -    public static final Setting ASSERTING_TRANSPORT_MIN_VERSION_KEY = new Setting<>("transport.asserting.version.min",
      -            Integer.toString(Version.CURRENT.minimumCompatibilityVersion().id), (s) -> Version.fromId(Integer.parseInt(s)), false, Setting.Scope.CLUSTER);
      -    public static final Setting ASSERTING_TRANSPORT_MAX_VERSION_KEY = new Setting<>("transport.asserting.version.max",
      -        Integer.toString(Version.CURRENT.id), (s) -> Version.fromId(Integer.parseInt(s)), false, Setting.Scope.CLUSTER);
      +    public static final Setting ASSERTING_TRANSPORT_MIN_VERSION_KEY =
      +        new Setting<>("transport.asserting.version.min", Integer.toString(Version.CURRENT.minimumCompatibilityVersion().id),
      +            (s) -> Version.fromId(Integer.parseInt(s)), Property.NodeScope);
      +    public static final Setting ASSERTING_TRANSPORT_MAX_VERSION_KEY =
      +        new Setting<>("transport.asserting.version.max", Integer.toString(Version.CURRENT.id),
      +            (s) -> Version.fromId(Integer.parseInt(s)), Property.NodeScope);
           private final Random random;
           private final Version minVersion;
           private final Version maxVersion;
      diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java
      index 03745183240..f5fd5123847 100644
      --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java
      +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java
      @@ -99,22 +99,22 @@ public class MockTransportService extends TransportService {
           public static MockTransportService local(Settings settings, Version version, ThreadPool threadPool) {
               NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
               Transport transport = new LocalTransport(settings, threadPool, version, namedWriteableRegistry);
      -        return new MockTransportService(settings, transport, threadPool, namedWriteableRegistry);
      +        return new MockTransportService(settings, transport, threadPool);
           }
       
           public static MockTransportService nettyFromThreadPool(Settings settings, Version version, ThreadPool threadPool) {
               NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
               Transport transport = new NettyTransport(settings, threadPool, new NetworkService(settings), BigArrays.NON_RECYCLING_INSTANCE,
                       version, namedWriteableRegistry);
      -        return new MockTransportService(Settings.EMPTY, transport, threadPool, namedWriteableRegistry);
      +        return new MockTransportService(Settings.EMPTY, transport, threadPool);
           }
       
       
           private final Transport original;
       
           @Inject
      -    public MockTransportService(Settings settings, Transport transport, ThreadPool threadPool, NamedWriteableRegistry namedWriteableRegistry) {
      -        super(settings, new LookupTestTransport(transport), threadPool, namedWriteableRegistry);
      +    public MockTransportService(Settings settings, Transport transport, ThreadPool threadPool) {
      +        super(settings, new LookupTestTransport(transport), threadPool);
               this.original = transport;
           }
       
      diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/test/RestTestParserTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/test/RestTestParserTests.java
      index e15b62147cf..298f230d64a 100644
      --- a/test/framework/src/test/java/org/elasticsearch/test/rest/test/RestTestParserTests.java
      +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/test/RestTestParserTests.java
      @@ -74,7 +74,7 @@ public class RestTestParserTests extends ESTestCase {
                               "\"Get type mapping - pre 1.0\":\n" +
                               "\n" +
                               "  - skip:\n" +
      -                        "      version:     \"0.90.9 - \"\n" +
      +                        "      version:     \"2.0.0 - \"\n" +
                               "      reason:      \"for newer versions the index name is always returned\"\n" +
                               "\n" +
                               "  - do:\n" +
      @@ -121,7 +121,7 @@ public class RestTestParserTests extends ESTestCase {
               assertThat(restTestSuite.getTestSections().get(1).getName(), equalTo("Get type mapping - pre 1.0"));
               assertThat(restTestSuite.getTestSections().get(1).getSkipSection().isEmpty(), equalTo(false));
               assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getReason(), equalTo("for newer versions the index name is always returned"));
      -        assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getLowerVersion(), equalTo(Version.V_0_90_9));
      +        assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getLowerVersion(), equalTo(Version.V_2_0_0));
               assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getUpperVersion(), equalTo(Version.CURRENT));
               assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().size(), equalTo(3));
               assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().get(0), instanceOf(DoSection.class));
      diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/test/SetupSectionParserTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/test/SetupSectionParserTests.java
      index 9dd388056d5..b3fe1f0f23b 100644
      --- a/test/framework/src/test/java/org/elasticsearch/test/rest/test/SetupSectionParserTests.java
      +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/test/SetupSectionParserTests.java
      @@ -57,7 +57,7 @@ public class SetupSectionParserTests extends AbstractParserTestCase {
           public void testParseSetupAndSkipSectionNoSkip() throws Exception {
               parser = YamlXContent.yamlXContent.createParser(
                       "  - skip:\n" +
      -                        "      version:  \"0.90.0 - 0.90.7\"\n" +
      +                        "      version:  \"2.0.0 - 2.3.0\"\n" +
                               "      reason:   \"Update doesn't return metadata fields, waiting for #3259\"\n" +
                               "  - do:\n" +
                               "      index1:\n" +
      @@ -79,8 +79,8 @@ public class SetupSectionParserTests extends AbstractParserTestCase {
               assertThat(setupSection, notNullValue());
               assertThat(setupSection.getSkipSection().isEmpty(), equalTo(false));
               assertThat(setupSection.getSkipSection(), notNullValue());
      -        assertThat(setupSection.getSkipSection().getLowerVersion(), equalTo(Version.V_0_90_0));
      -        assertThat(setupSection.getSkipSection().getUpperVersion(), equalTo(Version.V_0_90_7));
      +        assertThat(setupSection.getSkipSection().getLowerVersion(), equalTo(Version.V_2_0_0));
      +        assertThat(setupSection.getSkipSection().getUpperVersion(), equalTo(Version.V_2_3_0));
               assertThat(setupSection.getSkipSection().getReason(), equalTo("Update doesn't return metadata fields, waiting for #3259"));
               assertThat(setupSection.getDoSections().size(), equalTo(2));
               assertThat(setupSection.getDoSections().get(0).getApiCallSection().getApi(), equalTo("index1"));
      diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/test/SkipSectionParserTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/test/SkipSectionParserTests.java
      index 5864e78134d..39b0f284b5e 100644
      --- a/test/framework/src/test/java/org/elasticsearch/test/rest/test/SkipSectionParserTests.java
      +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/test/SkipSectionParserTests.java
      @@ -34,7 +34,7 @@ import static org.hamcrest.Matchers.nullValue;
       public class SkipSectionParserTests extends AbstractParserTestCase {
           public void testParseSkipSectionVersionNoFeature() throws Exception {
               parser = YamlXContent.yamlXContent.createParser(
      -                "version:     \" - 0.90.2\"\n" +
      +                "version:     \" - 2.1.0\"\n" +
                       "reason:      Delete ignores the parent param"
               );
       
      @@ -44,7 +44,7 @@ public class SkipSectionParserTests extends AbstractParserTestCase {
       
               assertThat(skipSection, notNullValue());
               assertThat(skipSection.getLowerVersion(), equalTo(VersionUtils.getFirstVersion()));
      -        assertThat(skipSection.getUpperVersion(), equalTo(Version.V_0_90_2));
      +        assertThat(skipSection.getUpperVersion(), equalTo(Version.V_2_1_0));
               assertThat(skipSection.getFeatures().size(), equalTo(0));
               assertThat(skipSection.getReason(), equalTo("Delete ignores the parent param"));
           }
      @@ -144,4 +144,4 @@ public class SkipSectionParserTests extends AbstractParserTestCase {
                   assertThat(e.getMessage(), is("version or features is mandatory within skip section"));
               }
           }
      -}
      \ No newline at end of file
      +}
      diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/test/TestSectionParserTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/test/TestSectionParserTests.java
      index c157610b645..d034ae56a71 100644
      --- a/test/framework/src/test/java/org/elasticsearch/test/rest/test/TestSectionParserTests.java
      +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/test/TestSectionParserTests.java
      @@ -70,7 +70,7 @@ public class TestSectionParserTests extends AbstractParserTestCase {
               String yaml =
                       "\"First test section\": \n" +
                               "  - skip:\n" +
      -                        "      version:  \"0.90.0 - 0.90.7\"\n" +
      +                        "      version:  \"2.0.0 - 2.2.0\"\n" +
                               "      reason:   \"Update doesn't return metadata fields, waiting for #3259\"\n" +
                               "  - do :\n" +
                               "      catch: missing\n" +
      @@ -87,8 +87,8 @@ public class TestSectionParserTests extends AbstractParserTestCase {
               assertThat(testSection, notNullValue());
               assertThat(testSection.getName(), equalTo("First test section"));
               assertThat(testSection.getSkipSection(), notNullValue());
      -        assertThat(testSection.getSkipSection().getLowerVersion(), equalTo(Version.V_0_90_0));
      -        assertThat(testSection.getSkipSection().getUpperVersion(), equalTo(Version.V_0_90_7));
      +        assertThat(testSection.getSkipSection().getLowerVersion(), equalTo(Version.V_2_0_0));
      +        assertThat(testSection.getSkipSection().getUpperVersion(), equalTo(Version.V_2_2_0));
               assertThat(testSection.getSkipSection().getReason(), equalTo("Update doesn't return metadata fields, waiting for #3259"));
               assertThat(testSection.getExecutableSections().size(), equalTo(2));
               DoSection doSection = (DoSection)testSection.getExecutableSections().get(0);
      diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/VersionUtilsTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/VersionUtilsTests.java
      index cc2f613eb27..ea1929a55b0 100644
      --- a/test/framework/src/test/java/org/elasticsearch/test/test/VersionUtilsTests.java
      +++ b/test/framework/src/test/java/org/elasticsearch/test/test/VersionUtilsTests.java
      @@ -32,7 +32,7 @@ public class VersionUtilsTests extends ESTestCase {
                   assertTrue(allVersions.get(i).before(allVersions.get(j)));
               }
           }
      -    
      +
           public void testRandomVersionBetween() {
               // full range
               Version got = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), Version.CURRENT);
      @@ -46,34 +46,34 @@ public class VersionUtilsTests extends ESTestCase {
               assertTrue(got.onOrBefore(Version.CURRENT));
       
               // sub range
      -        got = VersionUtils.randomVersionBetween(random(), Version.V_0_90_12, Version.V_1_4_5);
      -        assertTrue(got.onOrAfter(Version.V_0_90_12));
      -        assertTrue(got.onOrBefore(Version.V_1_4_5));
      +        got = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_5_0_0);
      +        assertTrue(got.onOrAfter(Version.V_2_0_0));
      +        assertTrue(got.onOrBefore(Version.V_5_0_0));
       
               // unbounded lower
      -        got = VersionUtils.randomVersionBetween(random(), null, Version.V_1_4_5);
      +        got = VersionUtils.randomVersionBetween(random(), null, Version.V_5_0_0);
               assertTrue(got.onOrAfter(VersionUtils.getFirstVersion()));
      -        assertTrue(got.onOrBefore(Version.V_1_4_5));
      +        assertTrue(got.onOrBefore(Version.V_5_0_0));
               got = VersionUtils.randomVersionBetween(random(), null, VersionUtils.allVersions().get(0));
               assertTrue(got.onOrAfter(VersionUtils.getFirstVersion()));
               assertTrue(got.onOrBefore(VersionUtils.allVersions().get(0)));
       
               // unbounded upper
      -        got = VersionUtils.randomVersionBetween(random(), Version.V_0_90_12, null);
      -        assertTrue(got.onOrAfter(Version.V_0_90_12));
      +        got = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, null);
      +        assertTrue(got.onOrAfter(Version.V_2_0_0));
               assertTrue(got.onOrBefore(Version.CURRENT));
               got = VersionUtils.randomVersionBetween(random(), VersionUtils.getPreviousVersion(), null);
               assertTrue(got.onOrAfter(VersionUtils.getPreviousVersion()));
               assertTrue(got.onOrBefore(Version.CURRENT));
      -        
      +
               // range of one
               got = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), VersionUtils.getFirstVersion());
               assertEquals(got, VersionUtils.getFirstVersion());
               got = VersionUtils.randomVersionBetween(random(), Version.CURRENT, Version.CURRENT);
               assertEquals(got, Version.CURRENT);
      -        got = VersionUtils.randomVersionBetween(random(), Version.V_1_2_4, Version.V_1_2_4);
      -        assertEquals(got, Version.V_1_2_4);
      -        
      +        got = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.V_5_0_0);
      +        assertEquals(got, Version.V_5_0_0);
      +
               // implicit range of one
               got = VersionUtils.randomVersionBetween(random(), null, VersionUtils.getFirstVersion());
               assertEquals(got, VersionUtils.getFirstVersion());
      diff --git a/core/src/main/java/org/elasticsearch/percolator/PercolatorIndex.java b/test/logger-usage/build.gradle
      similarity index 60%
      rename from core/src/main/java/org/elasticsearch/percolator/PercolatorIndex.java
      rename to test/logger-usage/build.gradle
      index 6f9a7104834..1a5815cf76e 100644
      --- a/core/src/main/java/org/elasticsearch/percolator/PercolatorIndex.java
      +++ b/test/logger-usage/build.gradle
      @@ -1,3 +1,5 @@
      +import org.elasticsearch.gradle.precommit.PrecommitTasks
      +
       /*
        * Licensed to Elasticsearch under one or more contributor
        * license agreements. See the NOTICE file distributed with
      @@ -17,21 +19,15 @@
        * under the License.
        */
       
      -package org.elasticsearch.percolator;
      -
      -import org.elasticsearch.index.mapper.ParsedDocument;
      -
      -/**
      - * Abstraction on how to index the percolator document.
      - */
      -interface PercolatorIndex {
      -
      -    /**
      -     * Indexes the document(s) and initializes the PercolateContext
      -     *
      -     * @param context  Initialized with document related properties for fetch phase.
      -     * @param document Document that is percolated. Can contain several documents.
      -     * */
      -    void prepare(PercolateContext context, ParsedDocument document);
      -
      +dependencies {
      +  compile 'org.ow2.asm:asm-debug-all:5.0.4' // use asm-debug-all as asm-all is broken
      +  testCompile "org.elasticsearch.test:framework:${version}"
       }
      +
      +loggerUsageCheck.enabled = false
      +
      +forbiddenApisMain.enabled = true // disabled by parent project
      +forbiddenApisMain {
      +  signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] // does not depend on core, only jdk signatures
      +}
      +jarHell.enabled = true // disabled by parent project
      \ No newline at end of file
      diff --git a/test/logger-usage/src/main/java/org/elasticsearch/test/loggerusage/ESLoggerUsageChecker.java b/test/logger-usage/src/main/java/org/elasticsearch/test/loggerusage/ESLoggerUsageChecker.java
      new file mode 100644
      index 00000000000..25d4052c162
      --- /dev/null
      +++ b/test/logger-usage/src/main/java/org/elasticsearch/test/loggerusage/ESLoggerUsageChecker.java
      @@ -0,0 +1,459 @@
      +/*
      + * Licensed to Elasticsearch under one or more contributor
      + * license agreements. See the NOTICE file distributed with
      + * this work for additional information regarding copyright
      + * ownership. Elasticsearch licenses this file to you under
      + * the Apache License, Version 2.0 (the "License"); you may
      + * not use this file except in compliance with the License.
      + * You may obtain a copy of the License at
      + *
      + *    http://www.apache.org/licenses/LICENSE-2.0
      + *
      + * Unless required by applicable law or agreed to in writing,
      + * software distributed under the License is distributed on an
      + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
      + * KIND, either express or implied.  See the License for the
      + * specific language governing permissions and limitations
      + * under the License.
      + */
      +
      +package org.elasticsearch.test.loggerusage;
      +
      +import org.objectweb.asm.AnnotationVisitor;
      +import org.objectweb.asm.ClassReader;
      +import org.objectweb.asm.ClassVisitor;
      +import org.objectweb.asm.MethodVisitor;
      +import org.objectweb.asm.Opcodes;
      +import org.objectweb.asm.Type;
      +import org.objectweb.asm.tree.AbstractInsnNode;
      +import org.objectweb.asm.tree.IntInsnNode;
      +import org.objectweb.asm.tree.LdcInsnNode;
      +import org.objectweb.asm.tree.LineNumberNode;
      +import org.objectweb.asm.tree.MethodInsnNode;
      +import org.objectweb.asm.tree.MethodNode;
      +import org.objectweb.asm.tree.TypeInsnNode;
      +import org.objectweb.asm.tree.analysis.Analyzer;
      +import org.objectweb.asm.tree.analysis.AnalyzerException;
      +import org.objectweb.asm.tree.analysis.BasicInterpreter;
      +import org.objectweb.asm.tree.analysis.BasicValue;
      +import org.objectweb.asm.tree.analysis.Frame;
      +
      +import java.io.IOException;
      +import java.io.InputStream;
      +import java.nio.file.FileVisitResult;
      +import java.nio.file.Files;
      +import java.nio.file.Path;
      +import java.nio.file.Paths;
      +import java.nio.file.SimpleFileVisitor;
      +import java.nio.file.attribute.BasicFileAttributes;
      +import java.util.Arrays;
      +import java.util.List;
      +import java.util.function.Consumer;
      +import java.util.function.Predicate;
      +
      +public class ESLoggerUsageChecker {
      +    public static final String LOGGER_CLASS = "org.elasticsearch.common.logging.ESLogger";
      +    public static final String THROWABLE_CLASS = "java.lang.Throwable";
      +    public static final List LOGGER_METHODS = Arrays.asList("trace", "debug", "info", "warn", "error");
      +    public static final String IGNORE_CHECKS_ANNOTATION = "org.elasticsearch.common.SuppressLoggerChecks";
      +
      +    @SuppressForbidden(reason = "command line tool")
      +    public static void main(String... args) throws Exception {
      +        System.out.println("checking for wrong usages of ESLogger...");
      +        boolean[] wrongUsageFound = new boolean[1];
      +        checkLoggerUsage(wrongLoggerUsage -> {
      +            System.err.println(wrongLoggerUsage.getErrorLines());
      +            wrongUsageFound[0] = true;
      +        }, args);
      +        if (wrongUsageFound[0]) {
      +            throw new Exception("Wrong logger usages found");
      +        } else {
      +            System.out.println("No wrong usages found");
      +        }
      +    }
      +
      +    private static void checkLoggerUsage(Consumer wrongUsageCallback, String... classDirectories)
      +        throws IOException {
      +        for (String classDirectory : classDirectories) {
      +            Path root = Paths.get(classDirectory);
      +            if (Files.isDirectory(root) == false) {
      +                throw new IllegalArgumentException(root + " should be an existing directory");
      +            }
      +            Files.walkFileTree(root, new SimpleFileVisitor() {
      +                @Override
      +                public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
      +                    if (Files.isRegularFile(file) && file.endsWith(".class")) {
      +                        try (InputStream in = Files.newInputStream(file)) {
      +                            ESLoggerUsageChecker.check(wrongUsageCallback, in);
      +                        }
      +                    }
      +                    return super.visitFile(file, attrs);
      +                }
      +            });
      +        }
      +    }
      +
      +    public static void check(Consumer wrongUsageCallback, InputStream inputStream) throws IOException {
      +        check(wrongUsageCallback, inputStream, s -> true);
      +    }
      +
      +    // used by tests
      +    static void check(Consumer wrongUsageCallback, InputStream inputStream, Predicate methodsToCheck)
      +        throws IOException {
      +        ClassReader cr = new ClassReader(inputStream);
      +        cr.accept(new ClassChecker(wrongUsageCallback, methodsToCheck), 0);
      +    }
      +
      +    public static class WrongLoggerUsage {
      +        private final String className;
      +        private final String methodName;
      +        private final String logMethodName;
      +        private final int line;
      +        private final String errorMessage;
      +
      +        public WrongLoggerUsage(String className, String methodName, String logMethodName, int line, String errorMessage) {
      +            this.className = className;
      +            this.methodName = methodName;
      +            this.logMethodName = logMethodName;
      +            this.line = line;
      +            this.errorMessage = errorMessage;
      +        }
      +
      +        @Override
      +        public String toString() {
      +            return "WrongLoggerUsage{" +
      +                "className='" + className + '\'' +
      +                ", methodName='" + methodName + '\'' +
      +                ", logMethodName='" + logMethodName + '\'' +
      +                ", line=" + line +
      +                ", errorMessage='" + errorMessage + '\'' +
      +                '}';
      +        }
      +
      +        /**
      +         * Returns an error message that has the form of stack traces emitted by {@link Throwable#printStackTrace}
      +         */
      +        public String getErrorLines() {
      +            String fullClassName = Type.getObjectType(className).getClassName();
      +            String simpleClassName = fullClassName.substring(fullClassName.lastIndexOf(".") + 1, fullClassName.length());
      +            int innerClassIndex = simpleClassName.indexOf("$");
      +            if (innerClassIndex > 0) {
      +                simpleClassName = simpleClassName.substring(0, innerClassIndex);
      +            }
      +            simpleClassName = simpleClassName + ".java";
      +            StringBuilder sb = new StringBuilder();
      +            sb.append("Bad usage of ");
      +            sb.append(LOGGER_CLASS).append("#").append(logMethodName);
      +            sb.append(": ");
      +            sb.append(errorMessage);
      +            sb.append("\n\tat ");
      +            sb.append(fullClassName);
      +            sb.append(".");
      +            sb.append(methodName);
      +            sb.append("(");
      +            sb.append(simpleClassName);
      +            sb.append(":");
      +            sb.append(line);
      +            sb.append(")");
      +            return sb.toString();
      +        }
      +    }
      +
      +    private static class ClassChecker extends ClassVisitor {
      +        private String className;
      +        private boolean ignoreChecks;
      +        private final Consumer wrongUsageCallback;
      +        private final Predicate methodsToCheck;
      +
      +        public ClassChecker(Consumer wrongUsageCallback, Predicate methodsToCheck) {
      +            super(Opcodes.ASM5);
      +            this.wrongUsageCallback = wrongUsageCallback;
      +            this.methodsToCheck = methodsToCheck;
      +        }
      +
      +        @Override
      +        public void visit(int version, int access, String name, String signature, String superName, String[] interfaces) {
      +            this.className = name;
      +        }
      +
      +        @Override
      +        public AnnotationVisitor visitAnnotation(String desc, boolean visible) {
      +            if (IGNORE_CHECKS_ANNOTATION.equals(Type.getType(desc).getClassName())) {
      +                ignoreChecks = true;
      +            }
      +            return super.visitAnnotation(desc, visible);
      +        }
      +
      +        @Override
      +        public MethodVisitor visitMethod(int access, String name, String desc, String signature, String[] exceptions) {
      +            if (ignoreChecks == false && methodsToCheck.test(name)) {
      +                return new MethodChecker(this.className, access, name, desc, wrongUsageCallback);
      +            } else {
      +                return super.visitMethod(access, name, desc, signature, exceptions);
      +            }
      +        }
      +    }
      +
      +    private static class MethodChecker extends MethodVisitor {
      +        private final String className;
      +        private final Consumer wrongUsageCallback;
      +        private boolean ignoreChecks;
      +
      +        public MethodChecker(String className, int access, String name, String desc, Consumer wrongUsageCallback) {
      +            super(Opcodes.ASM5, new MethodNode(access, name, desc, null, null));
      +            this.className = className;
      +            this.wrongUsageCallback = wrongUsageCallback;
      +        }
      +
      +        @Override
      +        public AnnotationVisitor visitAnnotation(String desc, boolean visible) {
      +            if (IGNORE_CHECKS_ANNOTATION.equals(Type.getType(desc).getClassName())) {
      +                ignoreChecks = true;
      +            }
      +            return super.visitAnnotation(desc, visible);
      +        }
      +
      +        @Override
      +        public void visitEnd() {
      +            if (ignoreChecks == false) {
      +                findBadLoggerUsages((MethodNode) mv);
      +            }
      +            super.visitEnd();
      +        }
      +
      +        public void findBadLoggerUsages(MethodNode methodNode) {
      +            Analyzer stringPlaceHolderAnalyzer = new Analyzer<>(new PlaceHolderStringInterpreter());
      +            Analyzer arraySizeAnalyzer = new Analyzer<>(new ArraySizeInterpreter());
      +            try {
      +                stringPlaceHolderAnalyzer.analyze(className, methodNode);
      +                arraySizeAnalyzer.analyze(className, methodNode);
      +            } catch (AnalyzerException e) {
      +                throw new RuntimeException("Internal error: failed in analysis step", e);
      +            }
      +            Frame[] stringFrames = stringPlaceHolderAnalyzer.getFrames();
      +            Frame[] arraySizeFrames = arraySizeAnalyzer.getFrames();
      +            AbstractInsnNode[] insns = methodNode.instructions.toArray();
      +            int lineNumber = -1;
      +            for (int i = 0; i < insns.length; i++) {
      +                AbstractInsnNode insn = insns[i];
      +                if (insn instanceof LineNumberNode) {
      +                    LineNumberNode lineNumberNode = (LineNumberNode) insn;
      +                    lineNumber = lineNumberNode.line;
      +                }
      +                if (insn.getOpcode() == Opcodes.INVOKEVIRTUAL) {
      +                    MethodInsnNode methodInsn = (MethodInsnNode) insn;
      +                    if (Type.getObjectType(methodInsn.owner).getClassName().equals(LOGGER_CLASS) == false) {
      +                        continue;
      +                    }
      +                    if (LOGGER_METHODS.contains(methodInsn.name) == false) {
      +                        continue;
      +                    }
      +                    Type[] argumentTypes = Type.getArgumentTypes(methodInsn.desc);
      +                    BasicValue logMessageLengthObject = getStackValue(stringFrames[i], argumentTypes.length - 1); // first argument
      +                    if (logMessageLengthObject instanceof PlaceHolderStringBasicValue == false) {
      +                        wrongUsageCallback.accept(new WrongLoggerUsage(className, methodNode.name, methodInsn.name, lineNumber,
      +                            "First argument must be a string constant so that we can statically ensure proper place holder usage"));
      +                        continue;
      +                    }
      +                    PlaceHolderStringBasicValue logMessageLength = (PlaceHolderStringBasicValue) logMessageLengthObject;
      +                    if (logMessageLength.minValue != logMessageLength.maxValue) {
      +                        wrongUsageCallback.accept(new WrongLoggerUsage(className, methodNode.name, methodInsn.name, lineNumber,
      +                            "Multiple log messages with conflicting number of place holders"));
      +                        continue;
      +                    }
      +                    BasicValue varArgsSizeObject = getStackValue(arraySizeFrames[i], 0); // last argument
      +                    if (varArgsSizeObject instanceof ArraySizeBasicValue == false) {
      +                        wrongUsageCallback.accept(new WrongLoggerUsage(className, methodNode.name, methodInsn.name, lineNumber,
      +                            "Could not determine size of varargs array"));
      +                        continue;
      +                    }
      +                    ArraySizeBasicValue varArgsSize = (ArraySizeBasicValue) varArgsSizeObject;
      +                    if (varArgsSize.minValue != varArgsSize.maxValue) {
      +                        wrongUsageCallback.accept(new WrongLoggerUsage(className, methodNode.name, methodInsn.name, lineNumber,
      +                            "Multiple parameter arrays with conflicting sizes"));
      +                        continue;
      +                    }
      +                    assert logMessageLength.minValue == logMessageLength.maxValue && varArgsSize.minValue == varArgsSize.maxValue;
      +                    if (logMessageLength.minValue != varArgsSize.minValue) {
      +                        wrongUsageCallback.accept(new WrongLoggerUsage(className, methodNode.name, methodInsn.name, lineNumber,
      +                            "Expected " + logMessageLength.minValue + " arguments but got " + varArgsSize.minValue));
      +                        continue;
      +                    }
      +                }
      +            }
      +        }
      +    }
      +
      +    private static int calculateNumberOfPlaceHolders(String message) {
      +        int count = 0;
      +        for (int i = 1; i < message.length(); i++) {
      +            if (message.charAt(i - 1) == '{' && message.charAt(i) == '}') {
      +                count++;
      +                i += 1;
      +            }
      +        }
      +        return count;
      +    }
      +
      +    private static BasicValue getStackValue(Frame f, int index) {
      +        int top = f.getStackSize() - 1;
      +        return index <= top ? f.getStack(top - index) : null;
      +    }
      +
      +    private static class IntMinMaxTrackingBasicValue extends BasicValue {
      +        protected final int minValue;
      +        protected final int maxValue;
      +
      +        public IntMinMaxTrackingBasicValue(Type type, int value) {
      +            super(type);
      +            this.minValue = value;
      +            this.maxValue = value;
      +        }
      +
      +        public IntMinMaxTrackingBasicValue(Type type, int minValue, int maxValue) {
      +            super(type);
      +            this.minValue = minValue;
      +            this.maxValue = maxValue;
      +        }
      +
      +        @Override
      +        public boolean equals(Object o) {
      +            if (this == o) return true;
      +            if (o == null || getClass() != o.getClass()) return false;
      +            if (!super.equals(o)) return false;
      +
      +            IntMinMaxTrackingBasicValue that = (IntMinMaxTrackingBasicValue) o;
      +
      +            if (minValue != that.minValue) return false;
      +            return maxValue == that.maxValue;
      +
      +        }
      +
      +        @Override
      +        public int hashCode() {
      +            int result = super.hashCode();
      +            result = 31 * result + minValue;
      +            result = 31 * result + maxValue;
      +            return result;
      +        }
      +
      +        @Override
      +        public String toString() {
      +            return "IntMinMaxTrackingBasicValue{" +
      +                "minValue=" + minValue +
      +                ", maxValue=" + maxValue +
      +                '}';
      +        }
      +    }
      +
      +    private static final class PlaceHolderStringBasicValue extends IntMinMaxTrackingBasicValue {
      +        public static final Type STRING_OBJECT_TYPE = Type.getObjectType("java/lang/String");
      +
      +        public PlaceHolderStringBasicValue(int placeHolders) {
      +            super(STRING_OBJECT_TYPE, placeHolders);
      +        }
      +
      +        public PlaceHolderStringBasicValue(int minPlaceHolders, int maxPlaceHolders) {
      +            super(STRING_OBJECT_TYPE, minPlaceHolders, maxPlaceHolders);
      +        }
      +    }
      +
      +    private static final class ArraySizeBasicValue extends IntMinMaxTrackingBasicValue {
      +        public ArraySizeBasicValue(Type type, int minArraySize, int maxArraySize) {
      +            super(type, minArraySize, maxArraySize);
      +        }
      +    }
      +
      +    private static final class IntegerConstantBasicValue extends IntMinMaxTrackingBasicValue {
      +        public IntegerConstantBasicValue(Type type, int constant) {
      +            super(type, constant);
      +        }
      +
      +        public IntegerConstantBasicValue(Type type, int minConstant, int maxConstant) {
      +            super(type, minConstant, maxConstant);
      +        }
      +    }
      +
      +    private static final class PlaceHolderStringInterpreter extends BasicInterpreter {
      +        @Override
      +        public BasicValue newOperation(AbstractInsnNode insnNode) throws AnalyzerException {
      +            if (insnNode.getOpcode() == Opcodes.LDC) {
      +                Object constant = ((LdcInsnNode) insnNode).cst;
      +                if (constant instanceof String) {
      +                    return new PlaceHolderStringBasicValue(calculateNumberOfPlaceHolders((String) constant));
      +                }
      +            }
      +            return super.newOperation(insnNode);
      +        }
      +
      +        @Override
      +        public BasicValue merge(BasicValue value1, BasicValue value2) {
      +            if (value1 instanceof PlaceHolderStringBasicValue && value2 instanceof PlaceHolderStringBasicValue
      +                && value1.equals(value2) == false) {
      +                PlaceHolderStringBasicValue c1 = (PlaceHolderStringBasicValue) value1;
      +                PlaceHolderStringBasicValue c2 = (PlaceHolderStringBasicValue) value2;
      +                return new PlaceHolderStringBasicValue(Math.min(c1.minValue, c2.minValue), Math.max(c1.maxValue, c2.maxValue));
      +            }
      +            return super.merge(value1, value2);
      +        }
      +    }
      +
      +    private static final class ArraySizeInterpreter extends BasicInterpreter {
      +        @Override
      +        public BasicValue newOperation(AbstractInsnNode insnNode) throws AnalyzerException {
      +            switch (insnNode.getOpcode()) {
      +                case ICONST_0: return new IntegerConstantBasicValue(Type.INT_TYPE, 0);
      +                case ICONST_1: return new IntegerConstantBasicValue(Type.INT_TYPE, 1);
      +                case ICONST_2: return new IntegerConstantBasicValue(Type.INT_TYPE, 2);
      +                case ICONST_3: return new IntegerConstantBasicValue(Type.INT_TYPE, 3);
      +                case ICONST_4: return new IntegerConstantBasicValue(Type.INT_TYPE, 4);
      +                case ICONST_5: return new IntegerConstantBasicValue(Type.INT_TYPE, 5);
      +                case BIPUSH:
      +                case SIPUSH: return new IntegerConstantBasicValue(Type.INT_TYPE, ((IntInsnNode)insnNode).operand);
      +                case Opcodes.LDC: {
      +                    Object constant = ((LdcInsnNode)insnNode).cst;
      +                    if (constant instanceof Integer) {
      +                        return new IntegerConstantBasicValue(Type.INT_TYPE, (Integer)constant);
      +                    } else {
      +                        return super.newOperation(insnNode);
      +                    }
      +                }
      +                default: return super.newOperation(insnNode);
      +            }
      +        }
      +
      +        @Override
      +        public BasicValue merge(BasicValue value1, BasicValue value2) {
      +            if (value1 instanceof IntegerConstantBasicValue && value2 instanceof IntegerConstantBasicValue) {
      +                IntegerConstantBasicValue c1 = (IntegerConstantBasicValue) value1;
      +                IntegerConstantBasicValue c2 = (IntegerConstantBasicValue) value2;
      +                return new IntegerConstantBasicValue(Type.INT_TYPE, Math.min(c1.minValue, c2.minValue), Math.max(c1.maxValue, c2.maxValue));
      +            } else if (value1 instanceof ArraySizeBasicValue && value2 instanceof ArraySizeBasicValue) {
      +                ArraySizeBasicValue c1 = (ArraySizeBasicValue) value1;
      +                ArraySizeBasicValue c2 = (ArraySizeBasicValue) value2;
      +                return new ArraySizeBasicValue(Type.INT_TYPE, Math.min(c1.minValue, c2.minValue), Math.max(c1.maxValue, c2.maxValue));
      +            }
      +            return super.merge(value1, value2);
      +        }
      +
      +        @Override
      +        public BasicValue unaryOperation(AbstractInsnNode insnNode, BasicValue value) throws AnalyzerException {
      +            if (insnNode.getOpcode() == Opcodes.ANEWARRAY && value instanceof IntegerConstantBasicValue) {
      +                IntegerConstantBasicValue constantBasicValue = (IntegerConstantBasicValue) value;
      +                String desc = ((TypeInsnNode) insnNode).desc;
      +                return new ArraySizeBasicValue(Type.getType("[" + Type.getObjectType(desc)), constantBasicValue.minValue,
      +                    constantBasicValue.maxValue);
      +            }
      +            return super.unaryOperation(insnNode, value);
      +        }
      +
      +        @Override
      +        public BasicValue ternaryOperation(AbstractInsnNode insnNode, BasicValue value1, BasicValue value2, BasicValue value3)
      +            throws AnalyzerException {
      +            if (insnNode.getOpcode() == Opcodes.AASTORE && value1 instanceof ArraySizeBasicValue) {
      +                return value1;
      +            }
      +            return super.ternaryOperation(insnNode, value1, value2, value3);
      +        }
      +    }
      +}
      diff --git a/core/src/main/java/org/apache/lucene/index/memory/ExtendedMemoryIndex.java b/test/logger-usage/src/main/java/org/elasticsearch/test/loggerusage/SuppressForbidden.java
      similarity index 58%
      rename from core/src/main/java/org/apache/lucene/index/memory/ExtendedMemoryIndex.java
      rename to test/logger-usage/src/main/java/org/elasticsearch/test/loggerusage/SuppressForbidden.java
      index aec1bc75519..995269e9f02 100644
      --- a/core/src/main/java/org/apache/lucene/index/memory/ExtendedMemoryIndex.java
      +++ b/test/logger-usage/src/main/java/org/elasticsearch/test/loggerusage/SuppressForbidden.java
      @@ -17,15 +17,19 @@
        * under the License.
        */
       
      -package org.apache.lucene.index.memory;
      +package org.elasticsearch.test.loggerusage;
      +
      +import java.lang.annotation.ElementType;
      +import java.lang.annotation.Retention;
      +import java.lang.annotation.RetentionPolicy;
      +import java.lang.annotation.Target;
       
       /**
      - * This class overwrites {@link MemoryIndex} to make the reuse constructor visible.
      + * Annotation to suppress forbidden-apis errors inside a whole class, a method, or a field.
      + * Duplicated from core as main sources of logger-usage project have no dependencies on core
        */
      -public final class ExtendedMemoryIndex extends MemoryIndex {
      -
      -    public ExtendedMemoryIndex(boolean storeOffsets, boolean storePayloads, long maxReusedBytes) {
      -        super(storeOffsets, storePayloads, maxReusedBytes);
      -    }
      -
      +@Retention(RetentionPolicy.CLASS)
      +@Target({ ElementType.CONSTRUCTOR, ElementType.FIELD, ElementType.METHOD, ElementType.TYPE })
      +public @interface SuppressForbidden {
      +    String reason();
       }
      diff --git a/test/logger-usage/src/test/java/org/elasticsearch/test/loggerusage/ESLoggerUsageTests.java b/test/logger-usage/src/test/java/org/elasticsearch/test/loggerusage/ESLoggerUsageTests.java
      new file mode 100644
      index 00000000000..ab07ecbf45e
      --- /dev/null
      +++ b/test/logger-usage/src/test/java/org/elasticsearch/test/loggerusage/ESLoggerUsageTests.java
      @@ -0,0 +1,165 @@
      +/*
      + * Licensed to Elasticsearch under one or more contributor
      + * license agreements. See the NOTICE file distributed with
      + * this work for additional information regarding copyright
      + * ownership. Elasticsearch licenses this file to you under
      + * the Apache License, Version 2.0 (the "License"); you may
      + * not use this file except in compliance with the License.
      + * You may obtain a copy of the License at
      + *
      + *    http://www.apache.org/licenses/LICENSE-2.0
      + *
      + * Unless required by applicable law or agreed to in writing,
      + * software distributed under the License is distributed on an
      + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
      + * KIND, either express or implied.  See the License for the
      + * specific language governing permissions and limitations
      + * under the License.
      + */
      +
      +package org.elasticsearch.test.loggerusage;
      +
      +import org.elasticsearch.common.SuppressLoggerChecks;
      +import org.elasticsearch.common.logging.ESLogger;
      +import org.elasticsearch.test.loggerusage.ESLoggerUsageChecker.WrongLoggerUsage;
      +import org.elasticsearch.test.ESTestCase;
      +
      +import java.io.IOException;
      +import java.io.InputStream;
      +import java.lang.reflect.Method;
      +import java.util.ArrayList;
      +import java.util.List;
      +import java.util.function.Predicate;
      +
      +import static org.hamcrest.Matchers.equalTo;
      +import static org.hamcrest.Matchers.hasItem;
      +import static org.hamcrest.Matchers.notNullValue;
      +
      +public class ESLoggerUsageTests extends ESTestCase {
      +
      +    public void testLoggerUsageChecks() throws IOException {
      +        for (Method method : getClass().getMethods()) {
      +            if (method.getDeclaringClass().equals(getClass())) {
      +                if (method.getName().startsWith("check")) {
      +                    logger.info("Checking logger usage for method {}", method.getName());
      +                    InputStream classInputStream = getClass().getResourceAsStream(getClass().getSimpleName() + ".class");
      +                    List errors = new ArrayList<>();
      +                    ESLoggerUsageChecker.check(errors::add, classInputStream, Predicate.isEqual(method.getName()));
      +                    if (method.getName().startsWith("checkFail")) {
      +                        assertFalse("Expected " + method.getName() + " to have wrong ESLogger usage", errors.isEmpty());
      +                    } else {
      +                        assertTrue("Method " + method.getName() + " has unexpected ESLogger usage errors: " + errors, errors.isEmpty());
      +                    }
      +                } else {
      +                    assertTrue("only allow methods starting with test or check in this class", method.getName().startsWith("test"));
      +                }
      +            }
      +        }
      +    }
      +
      +    public void testLoggerUsageCheckerCompatibilityWithESLogger() throws NoSuchMethodException {
      +        assertThat(ESLoggerUsageChecker.LOGGER_CLASS, equalTo(ESLogger.class.getName()));
      +        assertThat(ESLoggerUsageChecker.THROWABLE_CLASS, equalTo(Throwable.class.getName()));
      +        int varargsMethodCount = 0;
      +        for (Method method : ESLogger.class.getMethods()) {
      +            if (method.isVarArgs()) {
      +                // check that logger usage checks all varargs methods
      +                assertThat(ESLoggerUsageChecker.LOGGER_METHODS, hasItem(method.getName()));
      +                varargsMethodCount++;
      +            }
      +        }
      +        // currently we have two overloaded methods for each of debug, info, ...
      +        // if that changes, we might want to have another look at the usage checker
      +        assertThat(varargsMethodCount, equalTo(ESLoggerUsageChecker.LOGGER_METHODS.size() * 2));
      +
      +        // check that signature is same as we expect in the usage checker
      +        for (String methodName : ESLoggerUsageChecker.LOGGER_METHODS) {
      +            assertThat(ESLogger.class.getMethod(methodName, String.class, Object[].class), notNullValue());
      +            assertThat(ESLogger.class.getMethod(methodName, String.class, Throwable.class, Object[].class), notNullValue());
      +        }
      +    }
      +
      +    public void checkNumberOfArguments1() {
      +        logger.info("Hello {}", "world");
      +    }
      +
      +    public void checkFailNumberOfArguments1() {
      +        logger.info("Hello {}");
      +    }
      +
      +    @SuppressLoggerChecks(reason = "test ignore functionality")
      +    public void checkIgnoreWhenAnnotationPresent() {
      +        logger.info("Hello {}");
      +    }
      +
      +    public void checkNumberOfArguments2() {
      +        logger.info("Hello {}, {}, {}", "world", 2, "third argument");
      +    }
      +
      +    public void checkFailNumberOfArguments2() {
      +        logger.info("Hello {}, {}", "world", 2, "third argument");
      +    }
      +
      +    public void checkNumberOfArguments3() {
      +        // long argument list (> 5), emits different bytecode
      +        logger.info("Hello {}, {}, {}, {}, {}, {}, {}", "world", 2, "third argument", 4, 5, 6, new String("last arg"));
      +    }
      +
      +    public void checkFailNumberOfArguments3() {
      +        logger.info("Hello {}, {}, {}, {}, {}, {}, {}", "world", 2, "third argument", 4, 5, 6, 7, new String("last arg"));
      +    }
      +
      +    public void checkOrderOfExceptionArgument() {
      +        logger.info("Hello", new Exception());
      +    }
      +
      +    public void checkOrderOfExceptionArgument1() {
      +        logger.info("Hello {}", new Exception(), "world");
      +    }
      +
      +    public void checkFailOrderOfExceptionArgument1() {
      +        logger.info("Hello {}", "world", new Exception());
      +    }
      +
      +    public void checkOrderOfExceptionArgument2() {
      +        logger.info("Hello {}, {}", new Exception(), "world", 42);
      +    }
      +
      +    public void checkFailOrderOfExceptionArgument2() {
      +        logger.info("Hello {}, {}", "world", 42, new Exception());
      +    }
      +
      +    public void checkFailNonConstantMessage(boolean b) {
      +        logger.info(Boolean.toString(b));
      +    }
      +
      +    public void checkComplexUsage(boolean b) {
      +        String message = "Hello {}, {}";
      +        Object[] args = new Object[] { "world", 42 };
      +        if (b) {
      +            message = "also two args {}{}";
      +            args = new Object[] { "world", 43 };
      +        }
      +        logger.info(message, args);
      +    }
      +
      +    public void checkFailComplexUsage1(boolean b) {
      +        String message = "Hello {}, {}";
      +        Object[] args = new Object[] { "world", 42 };
      +        if (b) {
      +            message = "just one arg {}";
      +            args = new Object[] { "world", 43 };
      +        }
      +        logger.info(message, args);
      +    }
      +
      +    public void checkFailComplexUsage2(boolean b) {
      +        String message = "Hello {}, {}";
      +        Object[] args = new Object[] { "world", 42 };
      +        if (b) {
      +            message = "also two args {}{}";
      +            args = new Object[] { "world", 43, "another argument" };
      +        }
      +        logger.info(message, args);
      +    }
      +}